three.webgpu.js•1.68 MB
/**
* @license
* Copyright 2010-2025 Three.js Authors
* SPDX-License-Identifier: MIT
*/
import { Color, Vector2, Vector3, Vector4, Matrix2, Matrix3, Matrix4, EventDispatcher, MathUtils, WebGLCoordinateSystem, WebGPUCoordinateSystem, ColorManagement, SRGBTransfer, NoToneMapping, StaticDrawUsage, InterleavedBuffer, DynamicDrawUsage, InterleavedBufferAttribute, NoColorSpace, UnsignedIntType, IntType, BackSide, CubeReflectionMapping, CubeRefractionMapping, TangentSpaceNormalMap, ObjectSpaceNormalMap, InstancedInterleavedBuffer, InstancedBufferAttribute, DataArrayTexture, FloatType, FramebufferTexture, LinearMipmapLinearFilter, DepthTexture, Material, NormalBlending, LineBasicMaterial, LineDashedMaterial, NoBlending, MeshNormalMaterial, WebGLCubeRenderTarget, BoxGeometry, Mesh, Scene, LinearFilter, CubeCamera, CubeTexture, EquirectangularReflectionMapping, EquirectangularRefractionMapping, AddOperation, MixOperation, MultiplyOperation, MeshBasicMaterial, MeshLambertMaterial, MeshPhongMaterial, Texture, MeshStandardMaterial, MeshPhysicalMaterial, MeshToonMaterial, MeshMatcapMaterial, SpriteMaterial, PointsMaterial, ShadowMaterial, arrayNeedsUint32, Uint32BufferAttribute, Uint16BufferAttribute, DoubleSide, Camera, DepthStencilFormat, DepthFormat, UnsignedInt248Type, UnsignedByteType, RenderTarget, Plane, Object3D, HalfFloatType, LinearMipMapLinearFilter, OrthographicCamera, BufferGeometry, Float32BufferAttribute, BufferAttribute, UVMapping, Euler, LinearSRGBColorSpace, LessCompare, VSMShadowMap, RGFormat, BasicShadowMap, SphereGeometry, CubeUVReflectionMapping, PerspectiveCamera, RGBAFormat, LinearMipmapNearestFilter, NearestMipmapLinearFilter, Float16BufferAttribute, REVISION, ArrayCamera, WebXRController, RAD2DEG, SRGBColorSpace, PCFShadowMap, FrontSide, Frustum, DataTexture, RedIntegerFormat, RedFormat, RGIntegerFormat, RGBIntegerFormat, RGBFormat, RGBAIntegerFormat, UnsignedShortType, ByteType, ShortType, warnOnce, createCanvasElement, AddEquation, SubtractEquation, ReverseSubtractEquation, ZeroFactor, OneFactor, SrcColorFactor, SrcAlphaFactor, SrcAlphaSaturateFactor, DstColorFactor, DstAlphaFactor, OneMinusSrcColorFactor, OneMinusSrcAlphaFactor, OneMinusDstColorFactor, OneMinusDstAlphaFactor, CullFaceNone, CullFaceBack, CullFaceFront, CustomBlending, MultiplyBlending, SubtractiveBlending, AdditiveBlending, NotEqualDepth, GreaterDepth, GreaterEqualDepth, EqualDepth, LessEqualDepth, LessDepth, AlwaysDepth, NeverDepth, UnsignedShort4444Type, UnsignedShort5551Type, UnsignedInt5999Type, AlphaFormat, LuminanceFormat, LuminanceAlphaFormat, RGB_S3TC_DXT1_Format, RGBA_S3TC_DXT1_Format, RGBA_S3TC_DXT3_Format, RGBA_S3TC_DXT5_Format, RGB_PVRTC_4BPPV1_Format, RGB_PVRTC_2BPPV1_Format, RGBA_PVRTC_4BPPV1_Format, RGBA_PVRTC_2BPPV1_Format, RGB_ETC1_Format, RGB_ETC2_Format, RGBA_ETC2_EAC_Format, RGBA_ASTC_4x4_Format, RGBA_ASTC_5x4_Format, RGBA_ASTC_5x5_Format, RGBA_ASTC_6x5_Format, RGBA_ASTC_6x6_Format, RGBA_ASTC_8x5_Format, RGBA_ASTC_8x6_Format, RGBA_ASTC_8x8_Format, RGBA_ASTC_10x5_Format, RGBA_ASTC_10x6_Format, RGBA_ASTC_10x8_Format, RGBA_ASTC_10x10_Format, RGBA_ASTC_12x10_Format, RGBA_ASTC_12x12_Format, RGBA_BPTC_Format, RED_RGTC1_Format, SIGNED_RED_RGTC1_Format, RED_GREEN_RGTC2_Format, SIGNED_RED_GREEN_RGTC2_Format, RepeatWrapping, ClampToEdgeWrapping, MirroredRepeatWrapping, NearestFilter, NearestMipmapNearestFilter, NeverCompare, AlwaysCompare, LessEqualCompare, EqualCompare, GreaterEqualCompare, GreaterCompare, NotEqualCompare, NotEqualStencilFunc, GreaterStencilFunc, GreaterEqualStencilFunc, EqualStencilFunc, LessEqualStencilFunc, LessStencilFunc, AlwaysStencilFunc, NeverStencilFunc, DecrementWrapStencilOp, IncrementWrapStencilOp, DecrementStencilOp, IncrementStencilOp, InvertStencilOp, ReplaceStencilOp, ZeroStencilOp, KeepStencilOp, MaxEquation, MinEquation, SpotLight, PointLight, DirectionalLight, RectAreaLight, AmbientLight, HemisphereLight, LightProbe, LinearToneMapping, ReinhardToneMapping, CineonToneMapping, ACESFilmicToneMapping, AgXToneMapping, NeutralToneMapping, Group, Loader, FileLoader, MaterialLoader, ObjectLoader } from './three.core.js';
export { AdditiveAnimationBlendMode, AnimationAction, AnimationClip, AnimationLoader, AnimationMixer, AnimationObjectGroup, AnimationUtils, ArcCurve, ArrowHelper, AttachedBindMode, Audio, AudioAnalyser, AudioContext, AudioListener, AudioLoader, AxesHelper, BasicDepthPacking, BatchedMesh, Bone, BooleanKeyframeTrack, Box2, Box3, Box3Helper, BoxHelper, BufferGeometryLoader, Cache, CameraHelper, CanvasTexture, CapsuleGeometry, CatmullRomCurve3, CircleGeometry, Clock, ColorKeyframeTrack, CompressedArrayTexture, CompressedCubeTexture, CompressedTexture, CompressedTextureLoader, ConeGeometry, ConstantAlphaFactor, ConstantColorFactor, Controls, CubeTextureLoader, CubicBezierCurve, CubicBezierCurve3, CubicInterpolant, CullFaceFrontBack, Curve, CurvePath, CustomToneMapping, CylinderGeometry, Cylindrical, Data3DTexture, DataTextureLoader, DataUtils, DefaultLoadingManager, DetachedBindMode, DirectionalLightHelper, DiscreteInterpolant, DodecahedronGeometry, DynamicCopyUsage, DynamicReadUsage, EdgesGeometry, EllipseCurve, ExtrudeGeometry, Fog, FogExp2, GLBufferAttribute, GLSL1, GLSL3, GridHelper, HemisphereLightHelper, IcosahedronGeometry, ImageBitmapLoader, ImageLoader, ImageUtils, InstancedBufferGeometry, InstancedMesh, Int16BufferAttribute, Int32BufferAttribute, Int8BufferAttribute, Interpolant, InterpolateDiscrete, InterpolateLinear, InterpolateSmooth, KeyframeTrack, LOD, LatheGeometry, Layers, Light, Line, Line3, LineCurve, LineCurve3, LineLoop, LineSegments, LinearInterpolant, LinearMipMapNearestFilter, LinearTransfer, LoaderUtils, LoadingManager, LoopOnce, LoopPingPong, LoopRepeat, MOUSE, MeshDepthMaterial, MeshDistanceMaterial, NearestMipMapLinearFilter, NearestMipMapNearestFilter, NormalAnimationBlendMode, NumberKeyframeTrack, OctahedronGeometry, OneMinusConstantAlphaFactor, OneMinusConstantColorFactor, PCFSoftShadowMap, Path, PlaneGeometry, PlaneHelper, PointLightHelper, Points, PolarGridHelper, PolyhedronGeometry, PositionalAudio, PropertyBinding, PropertyMixer, QuadraticBezierCurve, QuadraticBezierCurve3, Quaternion, QuaternionKeyframeTrack, QuaternionLinearInterpolant, RGBADepthPacking, RGBDepthPacking, RGB_BPTC_SIGNED_Format, RGB_BPTC_UNSIGNED_Format, RGDepthPacking, RawShaderMaterial, Ray, Raycaster, RenderTarget3D, RenderTargetArray, RingGeometry, ShaderMaterial, Shape, ShapeGeometry, ShapePath, ShapeUtils, Skeleton, SkeletonHelper, SkinnedMesh, Source, Sphere, Spherical, SphericalHarmonics3, SplineCurve, SpotLightHelper, Sprite, StaticCopyUsage, StaticReadUsage, StereoCamera, StreamCopyUsage, StreamDrawUsage, StreamReadUsage, StringKeyframeTrack, TOUCH, TetrahedronGeometry, TextureLoader, TextureUtils, TimestampQuery, TorusGeometry, TorusKnotGeometry, Triangle, TriangleFanDrawMode, TriangleStripDrawMode, TrianglesDrawMode, TubeGeometry, Uint8BufferAttribute, Uint8ClampedBufferAttribute, Uniform, UniformsGroup, VectorKeyframeTrack, VideoFrameTexture, VideoTexture, WebGL3DRenderTarget, WebGLArrayRenderTarget, WebGLRenderTarget, WireframeGeometry, WrapAroundEnding, ZeroCurvatureEnding, ZeroSlopeEnding } from './three.core.js';
const refreshUniforms = [
'alphaMap',
'alphaTest',
'anisotropy',
'anisotropyMap',
'anisotropyRotation',
'aoMap',
'attenuationColor',
'attenuationDistance',
'bumpMap',
'clearcoat',
'clearcoatMap',
'clearcoatNormalMap',
'clearcoatNormalScale',
'clearcoatRoughness',
'color',
'dispersion',
'displacementMap',
'emissive',
'emissiveMap',
'envMap',
'gradientMap',
'ior',
'iridescence',
'iridescenceIOR',
'iridescenceMap',
'iridescenceThicknessMap',
'lightMap',
'map',
'matcap',
'metalness',
'metalnessMap',
'normalMap',
'normalScale',
'opacity',
'roughness',
'roughnessMap',
'sheen',
'sheenColor',
'sheenColorMap',
'sheenRoughnessMap',
'shininess',
'specular',
'specularColor',
'specularColorMap',
'specularIntensity',
'specularIntensityMap',
'specularMap',
'thickness',
'transmission',
'transmissionMap'
];
/**
* This class is used by {@link WebGPURenderer} as management component.
* It's primary purpose is to determine whether render objects require a
* refresh right before they are going to be rendered or not.
*/
class NodeMaterialObserver {
/**
* Constructs a new node material observer.
*
* @param {NodeBuilder} builder - The node builder.
*/
constructor( builder ) {
/**
* A node material can be used by more than one render object so the
* monitor must maintain a list of render objects.
*
* @type {WeakMap<RenderObject,Object>}
*/
this.renderObjects = new WeakMap();
/**
* Whether the material uses node objects or not.
*
* @type {Boolean}
*/
this.hasNode = this.containsNode( builder );
/**
* Whether the node builder's 3D object is animated or not.
*
* @type {Boolean}
*/
this.hasAnimation = builder.object.isSkinnedMesh === true;
/**
* A list of all possible material uniforms
*
* @type {Array<String>}
*/
this.refreshUniforms = refreshUniforms;
/**
* Holds the current render ID from the node frame.
*
* @type {Number}
* @default 0
*/
this.renderId = 0;
}
/**
* Returns `true` if the given render object is verified for the first time of this observer.
*
* @param {RenderObject} renderObject - The render object.
* @return {Boolean} Whether the given render object is verified for the first time of this observer.
*/
firstInitialization( renderObject ) {
const hasInitialized = this.renderObjects.has( renderObject );
if ( hasInitialized === false ) {
this.getRenderObjectData( renderObject );
return true;
}
return false;
}
/**
* Returns monitoring data for the given render object.
*
* @param {RenderObject} renderObject - The render object.
* @return {Object} The monitoring data.
*/
getRenderObjectData( renderObject ) {
let data = this.renderObjects.get( renderObject );
if ( data === undefined ) {
const { geometry, material, object } = renderObject;
data = {
material: this.getMaterialData( material ),
geometry: {
id: geometry.id,
attributes: this.getAttributesData( geometry.attributes ),
indexVersion: geometry.index ? geometry.index.version : null,
drawRange: { start: geometry.drawRange.start, count: geometry.drawRange.count }
},
worldMatrix: object.matrixWorld.clone()
};
if ( object.center ) {
data.center = object.center.clone();
}
if ( object.morphTargetInfluences ) {
data.morphTargetInfluences = object.morphTargetInfluences.slice();
}
if ( renderObject.bundle !== null ) {
data.version = renderObject.bundle.version;
}
if ( data.material.transmission > 0 ) {
const { width, height } = renderObject.context;
data.bufferWidth = width;
data.bufferHeight = height;
}
this.renderObjects.set( renderObject, data );
}
return data;
}
/**
* Returns an attribute data structure holding the attributes versions for
* monitoring.
*
* @param {Object} attributes - The geometry attributes.
* @return {Object} An object for monitoring the versions of attributes.
*/
getAttributesData( attributes ) {
const attributesData = {};
for ( const name in attributes ) {
const attribute = attributes[ name ];
attributesData[ name ] = {
version: attribute.version
};
}
return attributesData;
}
/**
* Returns `true` if the node builder's material uses
* node properties.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {Boolean} Whether the node builder's material uses node properties or not.
*/
containsNode( builder ) {
const material = builder.material;
for ( const property in material ) {
if ( material[ property ] && material[ property ].isNode )
return true;
}
if ( builder.renderer.nodes.modelViewMatrix !== null || builder.renderer.nodes.modelNormalViewMatrix !== null )
return true;
return false;
}
/**
* Returns a material data structure holding the material property values for
* monitoring.
*
* @param {Material} material - The material.
* @return {Object} An object for monitoring material properties.
*/
getMaterialData( material ) {
const data = {};
for ( const property of this.refreshUniforms ) {
const value = material[ property ];
if ( value === null || value === undefined ) continue;
if ( typeof value === 'object' && value.clone !== undefined ) {
if ( value.isTexture === true ) {
data[ property ] = { id: value.id, version: value.version };
} else {
data[ property ] = value.clone();
}
} else {
data[ property ] = value;
}
}
return data;
}
/**
* Returns `true` if the given render object has not changed its state.
*
* @param {RenderObject} renderObject - The render object.
* @return {Boolean} Whether the given render object has changed its state or not.
*/
equals( renderObject ) {
const { object, material, geometry } = renderObject;
const renderObjectData = this.getRenderObjectData( renderObject );
// world matrix
if ( renderObjectData.worldMatrix.equals( object.matrixWorld ) !== true ) {
renderObjectData.worldMatrix.copy( object.matrixWorld );
return false;
}
// material
const materialData = renderObjectData.material;
for ( const property in materialData ) {
const value = materialData[ property ];
const mtlValue = material[ property ];
if ( value.equals !== undefined ) {
if ( value.equals( mtlValue ) === false ) {
value.copy( mtlValue );
return false;
}
} else if ( mtlValue.isTexture === true ) {
if ( value.id !== mtlValue.id || value.version !== mtlValue.version ) {
value.id = mtlValue.id;
value.version = mtlValue.version;
return false;
}
} else if ( value !== mtlValue ) {
materialData[ property ] = mtlValue;
return false;
}
}
if ( materialData.transmission > 0 ) {
const { width, height } = renderObject.context;
if ( renderObjectData.bufferWidth !== width || renderObjectData.bufferHeight !== height ) {
renderObjectData.bufferWidth = width;
renderObjectData.bufferHeight = height;
return false;
}
}
// geometry
const storedGeometryData = renderObjectData.geometry;
const attributes = geometry.attributes;
const storedAttributes = storedGeometryData.attributes;
const storedAttributeNames = Object.keys( storedAttributes );
const currentAttributeNames = Object.keys( attributes );
if ( storedGeometryData.id !== geometry.id ) {
storedGeometryData.id = geometry.id;
return false;
}
if ( storedAttributeNames.length !== currentAttributeNames.length ) {
renderObjectData.geometry.attributes = this.getAttributesData( attributes );
return false;
}
// compare each attribute
for ( const name of storedAttributeNames ) {
const storedAttributeData = storedAttributes[ name ];
const attribute = attributes[ name ];
if ( attribute === undefined ) {
// attribute was removed
delete storedAttributes[ name ];
return false;
}
if ( storedAttributeData.version !== attribute.version ) {
storedAttributeData.version = attribute.version;
return false;
}
}
// check index
const index = geometry.index;
const storedIndexVersion = storedGeometryData.indexVersion;
const currentIndexVersion = index ? index.version : null;
if ( storedIndexVersion !== currentIndexVersion ) {
storedGeometryData.indexVersion = currentIndexVersion;
return false;
}
// check drawRange
if ( storedGeometryData.drawRange.start !== geometry.drawRange.start || storedGeometryData.drawRange.count !== geometry.drawRange.count ) {
storedGeometryData.drawRange.start = geometry.drawRange.start;
storedGeometryData.drawRange.count = geometry.drawRange.count;
return false;
}
// morph targets
if ( renderObjectData.morphTargetInfluences ) {
let morphChanged = false;
for ( let i = 0; i < renderObjectData.morphTargetInfluences.length; i ++ ) {
if ( renderObjectData.morphTargetInfluences[ i ] !== object.morphTargetInfluences[ i ] ) {
morphChanged = true;
}
}
if ( morphChanged ) return true;
}
// center
if ( renderObjectData.center ) {
if ( renderObjectData.center.equals( object.center ) === false ) {
renderObjectData.center.copy( object.center );
return true;
}
}
// bundle
if ( renderObject.bundle !== null ) {
renderObjectData.version = renderObject.bundle.version;
}
return true;
}
/**
* Checks if the given render object requires a refresh.
*
* @param {RenderObject} renderObject - The render object.
* @param {NodeFrame} nodeFrame - The current node frame.
* @return {Boolean} Whether the given render object requires a refresh or not.
*/
needsRefresh( renderObject, nodeFrame ) {
if ( this.hasNode || this.hasAnimation || this.firstInitialization( renderObject ) )
return true;
const { renderId } = nodeFrame;
if ( this.renderId !== renderId ) {
this.renderId = renderId;
return true;
}
const isStatic = renderObject.object.static === true;
const isBundle = renderObject.bundle !== null && renderObject.bundle.static === true && this.getRenderObjectData( renderObject ).version === renderObject.bundle.version;
if ( isStatic || isBundle )
return false;
const notEqual = this.equals( renderObject ) !== true;
return notEqual;
}
}
/** @module NodeUtils **/
// cyrb53 (c) 2018 bryc (github.com/bryc). License: Public domain. Attribution appreciated.
// A fast and simple 64-bit (or 53-bit) string hash function with decent collision resistance.
// Largely inspired by MurmurHash2/3, but with a focus on speed/simplicity.
// See https://stackoverflow.com/questions/7616461/generate-a-hash-from-string-in-javascript/52171480#52171480
// https://github.com/bryc/code/blob/master/jshash/experimental/cyrb53.js
function cyrb53( value, seed = 0 ) {
let h1 = 0xdeadbeef ^ seed, h2 = 0x41c6ce57 ^ seed;
if ( value instanceof Array ) {
for ( let i = 0, val; i < value.length; i ++ ) {
val = value[ i ];
h1 = Math.imul( h1 ^ val, 2654435761 );
h2 = Math.imul( h2 ^ val, 1597334677 );
}
} else {
for ( let i = 0, ch; i < value.length; i ++ ) {
ch = value.charCodeAt( i );
h1 = Math.imul( h1 ^ ch, 2654435761 );
h2 = Math.imul( h2 ^ ch, 1597334677 );
}
}
h1 = Math.imul( h1 ^ ( h1 >>> 16 ), 2246822507 );
h1 ^= Math.imul( h2 ^ ( h2 >>> 13 ), 3266489909 );
h2 = Math.imul( h2 ^ ( h2 >>> 16 ), 2246822507 );
h2 ^= Math.imul( h1 ^ ( h1 >>> 13 ), 3266489909 );
return 4294967296 * ( 2097151 & h2 ) + ( h1 >>> 0 );
}
/**
* Computes a hash for the given string.
*
* @method
* @param {String} str - The string to be hashed.
* @return {Number} The hash.
*/
const hashString = ( str ) => cyrb53( str );
/**
* Computes a hash for the given array.
*
* @method
* @param {Array<Number>} array - The array to be hashed.
* @return {Number} The hash.
*/
const hashArray = ( array ) => cyrb53( array );
/**
* Computes a hash for the given list of parameters.
*
* @method
* @param {...Number} params - A list of parameters.
* @return {Number} The hash.
*/
const hash$1 = ( ...params ) => cyrb53( params );
/**
* Computes a cache key for the given node.
*
* @method
* @param {Object} object - The object to be hashed.
* @param {Boolean} [force=false] - Whether to force a cache key computation or not.
* @return {Number} The hash.
*/
function getCacheKey$1( object, force = false ) {
const values = [];
if ( object.isNode === true ) {
values.push( object.id );
object = object.getSelf();
}
for ( const { property, childNode } of getNodeChildren( object ) ) {
values.push( cyrb53( property.slice( 0, - 4 ) ), childNode.getCacheKey( force ) );
}
return cyrb53( values );
}
/**
* This generator function can be used to iterate over the node children
* of the given object.
*
* @generator
* @param {Object} node - The object to be hashed.
* @param {Boolean} [toJSON=false] - Whether to return JSON or not.
* @yields {Object} A result node holding the property, index (if available) and the child node.
*/
function* getNodeChildren( node, toJSON = false ) {
for ( const property in node ) {
// Ignore private properties.
if ( property.startsWith( '_' ) === true ) continue;
const object = node[ property ];
if ( Array.isArray( object ) === true ) {
for ( let i = 0; i < object.length; i ++ ) {
const child = object[ i ];
if ( child && ( child.isNode === true || toJSON && typeof child.toJSON === 'function' ) ) {
yield { property, index: i, childNode: child };
}
}
} else if ( object && object.isNode === true ) {
yield { property, childNode: object };
} else if ( typeof object === 'object' ) {
for ( const subProperty in object ) {
const child = object[ subProperty ];
if ( child && ( child.isNode === true || toJSON && typeof child.toJSON === 'function' ) ) {
yield { property, index: subProperty, childNode: child };
}
}
}
}
}
const typeFromLength = /*@__PURE__*/ new Map( [
[ 1, 'float' ],
[ 2, 'vec2' ],
[ 3, 'vec3' ],
[ 4, 'vec4' ],
[ 9, 'mat3' ],
[ 16, 'mat4' ]
] );
const dataFromObject = /*@__PURE__*/ new WeakMap();
/**
* Returns the data type for the given the length.
*
* @method
* @param {Number} length - The length.
* @return {String} The data type.
*/
function getTypeFromLength( length ) {
return typeFromLength.get( length );
}
/**
* Returns the typed array for the given data type.
*
* @method
* @param {String} type - The data type.
* @return {TypedArray} The typed array.
*/
function getTypedArrayFromType( type ) {
// Handle component type for vectors and matrices
if ( /[iu]?vec\d/.test( type ) ) {
// Handle int vectors
if ( type.startsWith( 'ivec' ) ) return Int32Array;
// Handle uint vectors
if ( type.startsWith( 'uvec' ) ) return Uint32Array;
// Default to float vectors
return Float32Array;
}
// Handle matrices (always float)
if ( /mat\d/.test( type ) ) return Float32Array;
// Basic types
if ( /float/.test( type ) ) return Float32Array;
if ( /uint/.test( type ) ) return Uint32Array;
if ( /int/.test( type ) ) return Int32Array;
throw new Error( `THREE.NodeUtils: Unsupported type: ${type}` );
}
/**
* Returns the length for the given data type.
*
* @method
* @param {String} type - The data type.
* @return {Number} The length.
*/
function getLengthFromType( type ) {
if ( /float|int|uint/.test( type ) ) return 1;
if ( /vec2/.test( type ) ) return 2;
if ( /vec3/.test( type ) ) return 3;
if ( /vec4/.test( type ) ) return 4;
if ( /mat2/.test( type ) ) return 4;
if ( /mat3/.test( type ) ) return 9;
if ( /mat4/.test( type ) ) return 16;
console.error( 'THREE.TSL: Unsupported type:', type );
}
/**
* Returns the data type for the given value.
*
* @method
* @param {Any} value - The value.
* @return {String?} The data type.
*/
function getValueType( value ) {
if ( value === undefined || value === null ) return null;
const typeOf = typeof value;
if ( value.isNode === true ) {
return 'node';
} else if ( typeOf === 'number' ) {
return 'float';
} else if ( typeOf === 'boolean' ) {
return 'bool';
} else if ( typeOf === 'string' ) {
return 'string';
} else if ( typeOf === 'function' ) {
return 'shader';
} else if ( value.isVector2 === true ) {
return 'vec2';
} else if ( value.isVector3 === true ) {
return 'vec3';
} else if ( value.isVector4 === true ) {
return 'vec4';
} else if ( value.isMatrix2 === true ) {
return 'mat2';
} else if ( value.isMatrix3 === true ) {
return 'mat3';
} else if ( value.isMatrix4 === true ) {
return 'mat4';
} else if ( value.isColor === true ) {
return 'color';
} else if ( value instanceof ArrayBuffer ) {
return 'ArrayBuffer';
}
return null;
}
/**
* Returns the value/object for the given data type and parameters.
*
* @method
* @param {String} type - The given type.
* @param {...Any} params - A parameter list.
* @return {Any} The value/object.
*/
function getValueFromType( type, ...params ) {
const last4 = type ? type.slice( - 4 ) : undefined;
if ( params.length === 1 ) { // ensure same behaviour as in NodeBuilder.format()
if ( last4 === 'vec2' ) params = [ params[ 0 ], params[ 0 ] ];
else if ( last4 === 'vec3' ) params = [ params[ 0 ], params[ 0 ], params[ 0 ] ];
else if ( last4 === 'vec4' ) params = [ params[ 0 ], params[ 0 ], params[ 0 ], params[ 0 ] ];
}
if ( type === 'color' ) {
return new Color( ...params );
} else if ( last4 === 'vec2' ) {
return new Vector2( ...params );
} else if ( last4 === 'vec3' ) {
return new Vector3( ...params );
} else if ( last4 === 'vec4' ) {
return new Vector4( ...params );
} else if ( last4 === 'mat2' ) {
return new Matrix2( ...params );
} else if ( last4 === 'mat3' ) {
return new Matrix3( ...params );
} else if ( last4 === 'mat4' ) {
return new Matrix4( ...params );
} else if ( type === 'bool' ) {
return params[ 0 ] || false;
} else if ( ( type === 'float' ) || ( type === 'int' ) || ( type === 'uint' ) ) {
return params[ 0 ] || 0;
} else if ( type === 'string' ) {
return params[ 0 ] || '';
} else if ( type === 'ArrayBuffer' ) {
return base64ToArrayBuffer( params[ 0 ] );
}
return null;
}
/**
* Gets the object data that can be shared between different rendering steps.
*
* @param {Object} object - The object to get the data for.
* @return {Object} The object data.
*/
function getDataFromObject( object ) {
let data = dataFromObject.get( object );
if ( data === undefined ) {
data = {};
dataFromObject.set( object, data );
}
return data;
}
/**
* Converts the given array buffer to a Base64 string.
*
* @method
* @param {ArrayBuffer} arrayBuffer - The array buffer.
* @return {String} The Base64 string.
*/
function arrayBufferToBase64( arrayBuffer ) {
let chars = '';
const array = new Uint8Array( arrayBuffer );
for ( let i = 0; i < array.length; i ++ ) {
chars += String.fromCharCode( array[ i ] );
}
return btoa( chars );
}
/**
* Converts the given Base64 string to an array buffer.
*
* @method
* @param {String} base64 - The Base64 string.
* @return {ArrayBuffer} The array buffer.
*/
function base64ToArrayBuffer( base64 ) {
return Uint8Array.from( atob( base64 ), c => c.charCodeAt( 0 ) ).buffer;
}
var NodeUtils = /*#__PURE__*/Object.freeze({
__proto__: null,
arrayBufferToBase64: arrayBufferToBase64,
base64ToArrayBuffer: base64ToArrayBuffer,
getCacheKey: getCacheKey$1,
getDataFromObject: getDataFromObject,
getLengthFromType: getLengthFromType,
getNodeChildren: getNodeChildren,
getTypeFromLength: getTypeFromLength,
getTypedArrayFromType: getTypedArrayFromType,
getValueFromType: getValueFromType,
getValueType: getValueType,
hash: hash$1,
hashArray: hashArray,
hashString: hashString
});
/** @module NodeConstants **/
/**
* Possible shader stages.
*
* @property {string} VERTEX The vertex shader stage.
* @property {string} FRAGMENT The fragment shader stage.
*/
const NodeShaderStage = {
VERTEX: 'vertex',
FRAGMENT: 'fragment'
};
/**
* Update types of a node.
*
* @property {string} NONE The update method is not executed.
* @property {string} FRAME The update method is executed per frame.
* @property {string} RENDER The update method is executed per render. A frame might be produced by multiple render calls so this value allows more detailed updates than FRAME.
* @property {string} OBJECT The update method is executed per {@link Object3D} that uses the node for rendering.
*/
const NodeUpdateType = {
NONE: 'none',
FRAME: 'frame',
RENDER: 'render',
OBJECT: 'object'
};
/**
* Data types of a node.
*
* @property {string} BOOLEAN Boolean type.
* @property {string} INTEGER Integer type.
* @property {string} FLOAT Float type.
* @property {string} VECTOR2 Two-dimensional vector type.
* @property {string} VECTOR3 Three-dimensional vector type.
* @property {string} VECTOR4 Four-dimensional vector type.
* @property {string} MATRIX2 2x2 matrix type.
* @property {string} MATRIX3 3x3 matrix type.
* @property {string} MATRIX4 4x4 matrix type.
*/
const NodeType = {
BOOLEAN: 'bool',
INTEGER: 'int',
FLOAT: 'float',
VECTOR2: 'vec2',
VECTOR3: 'vec3',
VECTOR4: 'vec4',
MATRIX2: 'mat2',
MATRIX3: 'mat3',
MATRIX4: 'mat4'
};
/**
* Access types of a node. These are relevant for compute and storage usage.
*
* @property {string} READ_ONLY Read-only access
* @property {string} WRITE_ONLY Write-only access.
* @property {string} READ_WRITE Read and write access.
*/
const NodeAccess = {
READ_ONLY: 'readOnly',
WRITE_ONLY: 'writeOnly',
READ_WRITE: 'readWrite',
};
const defaultShaderStages = [ 'fragment', 'vertex' ];
const defaultBuildStages = [ 'setup', 'analyze', 'generate' ];
const shaderStages = [ ...defaultShaderStages, 'compute' ];
const vectorComponents = [ 'x', 'y', 'z', 'w' ];
let _nodeId = 0;
/**
* Base class for all nodes.
*
* @augments EventDispatcher
*/
class Node extends EventDispatcher {
static get type() {
return 'Node';
}
/**
* Constructs a new node.
*
* @param {String?} nodeType - The node type.
*/
constructor( nodeType = null ) {
super();
/**
* The node type. This represents the result type of the node (e.g. `float` or `vec3`).
*
* @type {String?}
* @default null
*/
this.nodeType = nodeType;
/**
* The update type of the node's {@link Node#update} method. Possible values are listed in {@link NodeUpdateType}.
*
* @type {String}
* @default 'none'
*/
this.updateType = NodeUpdateType.NONE;
/**
* The update type of the node's {@link Node#updateBefore} method. Possible values are listed in {@link NodeUpdateType}.
*
* @type {String}
* @default 'none'
*/
this.updateBeforeType = NodeUpdateType.NONE;
/**
* The update type of the node's {@link Node#updateAfter} method. Possible values are listed in {@link NodeUpdateType}.
*
* @type {String}
* @default 'none'
*/
this.updateAfterType = NodeUpdateType.NONE;
/**
* The UUID of the node.
*
* @type {String}
* @readonly
*/
this.uuid = MathUtils.generateUUID();
/**
* The version of the node. The version automatically is increased when {@link Node#needsUpdate} is set to `true`.
*
* @type {Number}
* @readonly
* @default 0
*/
this.version = 0;
/**
* Whether this node is global or not. This property is relevant for the internal
* node caching system. All nodes which should be declared just once should
* set this flag to `true` (a typical example is {@link AttributeNode}).
*
* @type {Boolean}
* @default false
*/
this.global = false;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isNode = true;
// private
/**
* The cache key of this node.
*
* @private
* @type {Number?}
* @default null
*/
this._cacheKey = null;
/**
* The cache key 's version.
*
* @private
* @type {Number}
* @default 0
*/
this._cacheKeyVersion = 0;
Object.defineProperty( this, 'id', { value: _nodeId ++ } );
}
/**
* Set this property to `true` when the node should be regenerated.
*
* @type {Boolean}
* @default false
* @param {boolean} value
*/
set needsUpdate( value ) {
if ( value === true ) {
this.version ++;
}
}
/**
* The type of the class. The value is usually the constructor name.
*
* @type {String}
* @readonly
*/
get type() {
return this.constructor.type;
}
/**
* Convenient method for defining {@link Node#update}.
*
* @param {Function} callback - The update method.
* @param {String} updateType - The update type.
* @return {Node} A reference to this node.
*/
onUpdate( callback, updateType ) {
this.updateType = updateType;
this.update = callback.bind( this.getSelf() );
return this;
}
/**
* Convenient method for defining {@link Node#update}. Similar to {@link Node#onUpdate}, but
* this method automatically sets the update type to `FRAME`.
*
* @param {Function} callback - The update method.
* @return {Node} A reference to this node.
*/
onFrameUpdate( callback ) {
return this.onUpdate( callback, NodeUpdateType.FRAME );
}
/**
* Convenient method for defining {@link Node#update}. Similar to {@link Node#onUpdate}, but
* this method automatically sets the update type to `RENDER`.
*
* @param {Function} callback - The update method.
* @return {Node} A reference to this node.
*/
onRenderUpdate( callback ) {
return this.onUpdate( callback, NodeUpdateType.RENDER );
}
/**
* Convenient method for defining {@link Node#update}. Similar to {@link Node#onUpdate}, but
* this method automatically sets the update type to `OBJECT`.
*
* @param {Function} callback - The update method.
* @return {Node} A reference to this node.
*/
onObjectUpdate( callback ) {
return this.onUpdate( callback, NodeUpdateType.OBJECT );
}
/**
* Convenient method for defining {@link Node#updateReference}.
*
* @param {Function} callback - The update method.
* @return {Node} A reference to this node.
*/
onReference( callback ) {
this.updateReference = callback.bind( this.getSelf() );
return this;
}
/**
* The `this` reference might point to a Proxy so this method can be used
* to get the reference to the actual node instance.
*
* @return {Node} A reference to the node.
*/
getSelf() {
// Returns non-node object.
return this.self || this;
}
/**
* Nodes might refer to other objects like materials. This method allows to dynamically update the reference
* to such objects based on a given state (e.g. the current node frame or builder).
*
* @param {Any} state - This method can be invocated in different contexts so `state` can refer to any object type.
* @return {Any} The updated reference.
*/
updateReference( /*state*/ ) {
return this;
}
/**
* By default this method returns the value of the {@link Node#global} flag. This method
* can be overwritten in derived classes if an analytical way is required to determine the
* global status.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {Boolean} Whether this node is global or not.
*/
isGlobal( /*builder*/ ) {
return this.global;
}
/**
* Generator function that can be used to iterate over the child nodes.
*
* @generator
* @yields {Node} A child node.
*/
* getChildren() {
for ( const { childNode } of getNodeChildren( this ) ) {
yield childNode;
}
}
/**
* Calling this method dispatches the `dispose` event. This event can be used
* to register event listeners for clean up tasks.
*/
dispose() {
this.dispatchEvent( { type: 'dispose' } );
}
/**
* Callback for {@link Node#traverse}.
*
* @callback traverseCallback
* @param {Node} node - The current node.
*/
/**
* Can be used to traverse through the node's hierarchy.
*
* @param {traverseCallback} callback - A callback that is executed per node.
*/
traverse( callback ) {
callback( this );
for ( const childNode of this.getChildren() ) {
childNode.traverse( callback );
}
}
/**
* Returns the cache key for this node.
*
* @param {Boolean} [force=false] - When set to `true`, a recomputation of the cache key is forced.
* @return {Number} The cache key of the node.
*/
getCacheKey( force = false ) {
force = force || this.version !== this._cacheKeyVersion;
if ( force === true || this._cacheKey === null ) {
this._cacheKey = hash$1( getCacheKey$1( this, force ), this.customCacheKey() );
this._cacheKeyVersion = this.version;
}
return this._cacheKey;
}
/**
* Generate a custom cache key for this node.
*
* @return {Number} The cache key of the node.
*/
customCacheKey() {
return 0;
}
/**
* Returns the references to this node which is by default `this`.
*
* @return {Node} A reference to this node.
*/
getScope() {
return this;
}
/**
* Returns the hash of the node which is used to identify the node. By default it's
* the {@link Node#uuid} however derived node classes might have to overwrite this method
* depending on their implementation.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The hash.
*/
getHash( /*builder*/ ) {
return this.uuid;
}
/**
* Returns the update type of {@link Node#update}.
*
* @return {NodeUpdateType} The update type.
*/
getUpdateType() {
return this.updateType;
}
/**
* Returns the update type of {@link Node#updateBefore}.
*
* @return {NodeUpdateType} The update type.
*/
getUpdateBeforeType() {
return this.updateBeforeType;
}
/**
* Returns the update type of {@link Node#updateAfter}.
*
* @return {NodeUpdateType} The update type.
*/
getUpdateAfterType() {
return this.updateAfterType;
}
/**
* Certain types are composed of multiple elements. For example a `vec3`
* is composed of three `float` values. This method returns the type of
* these elements.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The type of the node.
*/
getElementType( builder ) {
const type = this.getNodeType( builder );
const elementType = builder.getElementType( type );
return elementType;
}
/**
* Returns the node member type for the given name.
*
* @param {NodeBuilder} builder - The current node builder.
* @param {String} name - The name of the member.
* @return {String} The type of the node.
*/
getMemberType( /*uilder, name*/ ) {
return 'void';
}
/**
* Returns the node's type.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The type of the node.
*/
getNodeType( builder ) {
const nodeProperties = builder.getNodeProperties( this );
if ( nodeProperties.outputNode ) {
return nodeProperties.outputNode.getNodeType( builder );
}
return this.nodeType;
}
/**
* This method is used during the build process of a node and ensures
* equal nodes are not built multiple times but just once. For example if
* `attribute( 'uv' )` is used multiple times by the user, the build
* process makes sure to process just the first node.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {Node} The shared node if possible. Otherwise `this` is returned.
*/
getShared( builder ) {
const hash = this.getHash( builder );
const nodeFromHash = builder.getNodeFromHash( hash );
return nodeFromHash || this;
}
/**
* Represents the setup stage which is the first step of the build process, see {@link Node#build} method.
* This method is often overwritten in derived modules to prepare the node which is used as the output/result.
* The output node must be returned in the `return` statement.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {Node?} The output node.
*/
setup( builder ) {
const nodeProperties = builder.getNodeProperties( this );
let index = 0;
for ( const childNode of this.getChildren() ) {
nodeProperties[ 'node' + index ++ ] = childNode;
}
// return a outputNode if exists or null
return nodeProperties.outputNode || null;
}
/**
* Represents the analyze stage which is the second step of the build process, see {@link Node#build} method.
* This stage analyzes the node hierarchy and ensures descendent nodes are built.
*
* @param {NodeBuilder} builder - The current node builder.
*/
analyze( builder ) {
const usageCount = builder.increaseUsage( this );
if ( usageCount === 1 ) {
// node flow children
const nodeProperties = builder.getNodeProperties( this );
for ( const childNode of Object.values( nodeProperties ) ) {
if ( childNode && childNode.isNode === true ) {
childNode.build( builder );
}
}
}
}
/**
* Represents the generate stage which is the third step of the build process, see {@link Node#build} method.
* This state builds the output node and returns the resulting shader string.
*
* @param {NodeBuilder} builder - The current node builder.
* @param {String?} output - Can be used to define the output type.
* @return {String?} The generated shader string.
*/
generate( builder, output ) {
const { outputNode } = builder.getNodeProperties( this );
if ( outputNode && outputNode.isNode === true ) {
return outputNode.build( builder, output );
}
}
/**
* The method can be implemented to update the node's internal state before it is used to render an object.
* The {@link Node#updateBeforeType} property defines how often the update is executed.
*
* @abstract
* @param {NodeFrame} frame - A reference to the current node frame.
* @return {Boolean?} An optional bool that indicates whether the implementation actually performed an update or not (e.g. due to caching).
*/
updateBefore( /*frame*/ ) {
console.warn( 'Abstract function.' );
}
/**
* The method can be implemented to update the node's internal state after it was used to render an object.
* The {@link Node#updateAfterType} property defines how often the update is executed.
*
* @abstract
* @param {NodeFrame} frame - A reference to the current node frame.
* @return {Boolean?} An optional bool that indicates whether the implementation actually performed an update or not (e.g. due to caching).
*/
updateAfter( /*frame*/ ) {
console.warn( 'Abstract function.' );
}
/**
* The method can be implemented to update the node's internal state when it is used to render an object.
* The {@link Node#updateType} property defines how often the update is executed.
*
* @abstract
* @param {NodeFrame} frame - A reference to the current node frame.
* @return {Boolean?} An optional bool that indicates whether the implementation actually performed an update or not (e.g. due to caching).
*/
update( /*frame*/ ) {
console.warn( 'Abstract function.' );
}
/**
* This method performs the build of a node. The behavior of this method as well as its return value depend
* on the current build stage (setup, analyze or generate).
*
* @param {NodeBuilder} builder - The current node builder.
* @param {String?} output - Can be used to define the output type.
* @return {String?} When this method is executed in the setup or analyze stage, `null` is returned. In the generate stage, the generated shader string.
*/
build( builder, output = null ) {
const refNode = this.getShared( builder );
if ( this !== refNode ) {
return refNode.build( builder, output );
}
builder.addNode( this );
builder.addChain( this );
/* Build stages expected results:
- "setup" -> Node
- "analyze" -> null
- "generate" -> String
*/
let result = null;
const buildStage = builder.getBuildStage();
if ( buildStage === 'setup' ) {
this.updateReference( builder );
const properties = builder.getNodeProperties( this );
if ( properties.initialized !== true ) {
//const stackNodesBeforeSetup = builder.stack.nodes.length;
properties.initialized = true;
const outputNode = this.setup( builder ); // return a node or null
const isNodeOutput = outputNode && outputNode.isNode === true;
/*if ( isNodeOutput && builder.stack.nodes.length !== stackNodesBeforeSetup ) {
// !! no outputNode !!
//outputNode = builder.stack;
}*/
for ( const childNode of Object.values( properties ) ) {
if ( childNode && childNode.isNode === true ) {
childNode.build( builder );
}
}
if ( isNodeOutput ) {
outputNode.build( builder );
}
properties.outputNode = outputNode;
}
} else if ( buildStage === 'analyze' ) {
this.analyze( builder );
} else if ( buildStage === 'generate' ) {
const isGenerateOnce = this.generate.length === 1;
if ( isGenerateOnce ) {
const type = this.getNodeType( builder );
const nodeData = builder.getDataFromNode( this );
result = nodeData.snippet;
if ( result === undefined ) {
result = this.generate( builder ) || '';
nodeData.snippet = result;
} else if ( nodeData.flowCodes !== undefined && builder.context.nodeBlock !== undefined ) {
builder.addFlowCodeHierarchy( this, builder.context.nodeBlock );
}
result = builder.format( result, type, output );
} else {
result = this.generate( builder, output ) || '';
}
}
builder.removeChain( this );
builder.addSequentialNode( this );
return result;
}
/**
* Returns the child nodes as a JSON object.
*
* @return {Array<Object>} An iterable list of serialized child objects as JSON.
*/
getSerializeChildren() {
return getNodeChildren( this );
}
/**
* Serializes the node to JSON.
*
* @param {Object} json - The output JSON object.
*/
serialize( json ) {
const nodeChildren = this.getSerializeChildren();
const inputNodes = {};
for ( const { property, index, childNode } of nodeChildren ) {
if ( index !== undefined ) {
if ( inputNodes[ property ] === undefined ) {
inputNodes[ property ] = Number.isInteger( index ) ? [] : {};
}
inputNodes[ property ][ index ] = childNode.toJSON( json.meta ).uuid;
} else {
inputNodes[ property ] = childNode.toJSON( json.meta ).uuid;
}
}
if ( Object.keys( inputNodes ).length > 0 ) {
json.inputNodes = inputNodes;
}
}
/**
* Deserializes the node from the given JSON.
*
* @param {Object} json - The JSON object.
*/
deserialize( json ) {
if ( json.inputNodes !== undefined ) {
const nodes = json.meta.nodes;
for ( const property in json.inputNodes ) {
if ( Array.isArray( json.inputNodes[ property ] ) ) {
const inputArray = [];
for ( const uuid of json.inputNodes[ property ] ) {
inputArray.push( nodes[ uuid ] );
}
this[ property ] = inputArray;
} else if ( typeof json.inputNodes[ property ] === 'object' ) {
const inputObject = {};
for ( const subProperty in json.inputNodes[ property ] ) {
const uuid = json.inputNodes[ property ][ subProperty ];
inputObject[ subProperty ] = nodes[ uuid ];
}
this[ property ] = inputObject;
} else {
const uuid = json.inputNodes[ property ];
this[ property ] = nodes[ uuid ];
}
}
}
}
/**
* Serializes the node into the three.js JSON Object/Scene format.
*
* @param {Object?} meta - An optional JSON object that already holds serialized data from other scene objects.
* @return {Object} The serialized node.
*/
toJSON( meta ) {
const { uuid, type } = this;
const isRoot = ( meta === undefined || typeof meta === 'string' );
if ( isRoot ) {
meta = {
textures: {},
images: {},
nodes: {}
};
}
// serialize
let data = meta.nodes[ uuid ];
if ( data === undefined ) {
data = {
uuid,
type,
meta,
metadata: {
version: 4.6,
type: 'Node',
generator: 'Node.toJSON'
}
};
if ( isRoot !== true ) meta.nodes[ data.uuid ] = data;
this.serialize( data );
delete data.meta;
}
// TODO: Copied from Object3D.toJSON
function extractFromCache( cache ) {
const values = [];
for ( const key in cache ) {
const data = cache[ key ];
delete data.metadata;
values.push( data );
}
return values;
}
if ( isRoot ) {
const textures = extractFromCache( meta.textures );
const images = extractFromCache( meta.images );
const nodes = extractFromCache( meta.nodes );
if ( textures.length > 0 ) data.textures = textures;
if ( images.length > 0 ) data.images = images;
if ( nodes.length > 0 ) data.nodes = nodes;
}
return data;
}
}
/**
* Base class for representing element access on an array-like
* node data structures.
*
* @augments Node
*/
class ArrayElementNode extends Node { // @TODO: If extending from TempNode it breaks webgpu_compute
static get type() {
return 'ArrayElementNode';
}
/**
* Constructs an array element node.
*
* @param {Node} node - The array-like node.
* @param {Node} indexNode - The index node that defines the element access.
*/
constructor( node, indexNode ) {
super();
/**
* The array-like node.
*
* @type {Node}
*/
this.node = node;
/**
* The index node that defines the element access.
*
* @type {Node}
*/
this.indexNode = indexNode;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isArrayElementNode = true;
}
/**
* This method is overwritten since the node type is inferred from the array-like node.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The node type.
*/
getNodeType( builder ) {
return this.node.getElementType( builder );
}
generate( builder ) {
const nodeSnippet = this.node.build( builder );
const indexSnippet = this.indexNode.build( builder, 'uint' );
return `${ nodeSnippet }[ ${ indexSnippet } ]`;
}
}
/**
* This module is part of the TSL core and usually not used in app level code.
* It represents a convert operation during the shader generation process
* meaning it converts the data type of a node to a target data type.
*
* @augments Node
*/
class ConvertNode extends Node {
static get type() {
return 'ConvertNode';
}
/**
* Constructs a new convert node.
*
* @param {Node} node - The node which type should be converted.
* @param {String} convertTo - The target node type. Multiple types can be defined by separating them with a `|` sign.
*/
constructor( node, convertTo ) {
super();
/**
* The node which type should be converted.
*
* @type {Node}
*/
this.node = node;
/**
* The target node type. Multiple types can be defined by separating them with a `|` sign.
*
* @type {String}
*/
this.convertTo = convertTo;
}
/**
* This method is overwritten since the implementation tries to infer the best
* matching type from the {@link ConvertNode#convertTo} property.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The node type.
*/
getNodeType( builder ) {
const requestType = this.node.getNodeType( builder );
let convertTo = null;
for ( const overloadingType of this.convertTo.split( '|' ) ) {
if ( convertTo === null || builder.getTypeLength( requestType ) === builder.getTypeLength( overloadingType ) ) {
convertTo = overloadingType;
}
}
return convertTo;
}
serialize( data ) {
super.serialize( data );
data.convertTo = this.convertTo;
}
deserialize( data ) {
super.deserialize( data );
this.convertTo = data.convertTo;
}
generate( builder, output ) {
const node = this.node;
const type = this.getNodeType( builder );
const snippet = node.build( builder, type );
return builder.format( snippet, type, output );
}
}
/**
* This module uses cache management to create temporary variables
* if the node is used more than once to prevent duplicate calculations.
*
* The class acts as a base class for many other nodes types.
*
* @augments Node
*/
class TempNode extends Node {
static get type() {
return 'TempNode';
}
/**
* Constructs a temp node.
*
* @param {String?} nodeType - The node type.
*/
constructor( nodeType = null ) {
super( nodeType );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isTempNode = true;
}
/**
* Whether this node is used more than once in context of other nodes.
*
* @param {NodeBuilder} builder - The node builder.
* @return {Boolean} A flag that indicates if there is more than one dependency to other nodes.
*/
hasDependencies( builder ) {
return builder.getDataFromNode( this ).usageCount > 1;
}
build( builder, output ) {
const buildStage = builder.getBuildStage();
if ( buildStage === 'generate' ) {
const type = builder.getVectorType( this.getNodeType( builder, output ) );
const nodeData = builder.getDataFromNode( this );
if ( nodeData.propertyName !== undefined ) {
return builder.format( nodeData.propertyName, type, output );
} else if ( type !== 'void' && output !== 'void' && this.hasDependencies( builder ) ) {
const snippet = super.build( builder, type );
const nodeVar = builder.getVarFromNode( this, null, type );
const propertyName = builder.getPropertyName( nodeVar );
builder.addLineFlowCode( `${ propertyName } = ${ snippet }`, this );
nodeData.snippet = snippet;
nodeData.propertyName = propertyName;
return builder.format( nodeData.propertyName, type, output );
}
}
return super.build( builder, output );
}
}
/**
* This module is part of the TSL core and usually not used in app level code.
* It represents a join operation during the shader generation process.
* For example in can compose/join two single floats into a `vec2` type.
*
* @augments TempNode
*/
class JoinNode extends TempNode {
static get type() {
return 'JoinNode';
}
/**
* Constructs a new join node.
*
* @param {Array<Node>} nodes - An array of nodes that should be joined.
* @param {String?} [nodeType=null] - The node type.
*/
constructor( nodes = [], nodeType = null ) {
super( nodeType );
/**
* An array of nodes that should be joined.
*
* @type {Array<Node>}
*/
this.nodes = nodes;
}
/**
* This method is overwritten since the node type must be inferred from the
* joined data length if not explicitly defined.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The node type.
*/
getNodeType( builder ) {
if ( this.nodeType !== null ) {
return builder.getVectorType( this.nodeType );
}
return builder.getTypeFromLength( this.nodes.reduce( ( count, cur ) => count + builder.getTypeLength( cur.getNodeType( builder ) ), 0 ) );
}
generate( builder, output ) {
const type = this.getNodeType( builder );
const nodes = this.nodes;
const primitiveType = builder.getComponentType( type );
const snippetValues = [];
for ( const input of nodes ) {
let inputSnippet = input.build( builder );
const inputPrimitiveType = builder.getComponentType( input.getNodeType( builder ) );
if ( inputPrimitiveType !== primitiveType ) {
inputSnippet = builder.format( inputSnippet, inputPrimitiveType, primitiveType );
}
snippetValues.push( inputSnippet );
}
const snippet = `${ builder.getType( type ) }( ${ snippetValues.join( ', ' ) } )`;
return builder.format( snippet, type, output );
}
}
const _stringVectorComponents = vectorComponents.join( '' );
/**
* This module is part of the TSL core and usually not used in app level code.
* `SplitNode` represents a property access operation which means it is
* used to implement any `.xyzw`, `.rgba` and `stpq` usage on node objects.
* For example:
* ```js
* const redValue = color.r;
* ```
*
* @augments Node
*/
class SplitNode extends Node {
static get type() {
return 'SplitNode';
}
/**
* Constructs a new split node.
*
* @param {Node} node - The node that should be accessed.
* @param {String} [components='x'] - The components that should be accessed.
*/
constructor( node, components = 'x' ) {
super();
/**
* The node that should be accessed.
*
* @type {Node}
*/
this.node = node;
/**
* The components that should be accessed.
*
* @type {string}
*/
this.components = components;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isSplitNode = true;
}
/**
* Returns the vector length which is computed based on the requested components.
*
* @return {Number} The vector length.
*/
getVectorLength() {
let vectorLength = this.components.length;
for ( const c of this.components ) {
vectorLength = Math.max( vectorComponents.indexOf( c ) + 1, vectorLength );
}
return vectorLength;
}
/**
* Returns the component type of the node's type.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The component type.
*/
getComponentType( builder ) {
return builder.getComponentType( this.node.getNodeType( builder ) );
}
/**
* This method is overwritten since the node type is inferred from requested components.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The node type.
*/
getNodeType( builder ) {
return builder.getTypeFromLength( this.components.length, this.getComponentType( builder ) );
}
generate( builder, output ) {
const node = this.node;
const nodeTypeLength = builder.getTypeLength( node.getNodeType( builder ) );
let snippet = null;
if ( nodeTypeLength > 1 ) {
let type = null;
const componentsLength = this.getVectorLength();
if ( componentsLength >= nodeTypeLength ) {
// needed expand the input node
type = builder.getTypeFromLength( this.getVectorLength(), this.getComponentType( builder ) );
}
const nodeSnippet = node.build( builder, type );
if ( this.components.length === nodeTypeLength && this.components === _stringVectorComponents.slice( 0, this.components.length ) ) {
// unnecessary swizzle
snippet = builder.format( nodeSnippet, type, output );
} else {
snippet = builder.format( `${nodeSnippet}.${this.components}`, this.getNodeType( builder ), output );
}
} else {
// ignore .components if .node returns float/integer
snippet = node.build( builder, output );
}
return snippet;
}
serialize( data ) {
super.serialize( data );
data.components = this.components;
}
deserialize( data ) {
super.deserialize( data );
this.components = data.components;
}
}
/**
* This module is part of the TSL core and usually not used in app level code.
* `SetNode` represents a set operation which means it is used to implement any
* `setXYZW()`, `setRGBA()` and `setSTPQ()` method invocations on node objects.
* For example:
* ```js
* materialLine.colorNode = color( 0, 0, 0 ).setR( float( 1 ) );
* ```
*
* @augments TempNode
*/
class SetNode extends TempNode {
static get type() {
return 'SetNode';
}
/**
* Constructs a new set node.
*
* @param {Node} sourceNode - The node that should be updated.
* @param {String} components - The components that should be updated.
* @param {Node} targetNode - The value node.
*/
constructor( sourceNode, components, targetNode ) {
super();
/**
* The node that should be updated.
*
* @type {Node}
*/
this.sourceNode = sourceNode;
/**
* The components that should be updated.
*
* @type {String}
*/
this.components = components;
/**
* The value node.
*
* @type {Node}
*/
this.targetNode = targetNode;
}
/**
* This method is overwritten since the node type is inferred from {@link SetNode#sourceNode}.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The node type.
*/
getNodeType( builder ) {
return this.sourceNode.getNodeType( builder );
}
generate( builder ) {
const { sourceNode, components, targetNode } = this;
const sourceType = this.getNodeType( builder );
const componentType = builder.getComponentType( targetNode.getNodeType( builder ) );
const targetType = builder.getTypeFromLength( components.length, componentType );
const targetSnippet = targetNode.build( builder, targetType );
const sourceSnippet = sourceNode.build( builder, sourceType );
const length = builder.getTypeLength( sourceType );
const snippetValues = [];
for ( let i = 0; i < length; i ++ ) {
const component = vectorComponents[ i ];
if ( component === components[ 0 ] ) {
snippetValues.push( targetSnippet );
i += components.length - 1;
} else {
snippetValues.push( sourceSnippet + '.' + component );
}
}
return `${ builder.getType( sourceType ) }( ${ snippetValues.join( ', ' ) } )`;
}
}
/**
* This module is part of the TSL core and usually not used in app level code.
* It represents a flip operation during the shader generation process
* meaning it flips normalized values with the following formula:
* ```
* x = 1 - x;
* ```
* `FlipNode` is internally used to implement any `flipXYZW()`, `flipRGBA()` and
* `flipSTPQ()` method invocations on node objects. For example:
* ```js
* uvNode = uvNode.flipY();
* ```
*
* @augments TempNode
*/
class FlipNode extends TempNode {
static get type() {
return 'FlipNode';
}
/**
* Constructs a new flip node.
*
* @param {Node} sourceNode - The node which component(s) should be flipped.
* @param {String} components - The components that should be flipped e.g. `'x'` or `'xy'`.
*/
constructor( sourceNode, components ) {
super();
/**
* The node which component(s) should be flipped.
*
* @type {Node}
*/
this.sourceNode = sourceNode;
/**
* The components that should be flipped e.g. `'x'` or `'xy'`.
*
* @type {String}
*/
this.components = components;
}
/**
* This method is overwritten since the node type is inferred from the source node.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The node type.
*/
getNodeType( builder ) {
return this.sourceNode.getNodeType( builder );
}
generate( builder ) {
const { components, sourceNode } = this;
const sourceType = this.getNodeType( builder );
const sourceSnippet = sourceNode.build( builder );
const sourceCache = builder.getVarFromNode( this );
const sourceProperty = builder.getPropertyName( sourceCache );
builder.addLineFlowCode( sourceProperty + ' = ' + sourceSnippet, this );
const length = builder.getTypeLength( sourceType );
const snippetValues = [];
let componentIndex = 0;
for ( let i = 0; i < length; i ++ ) {
const component = vectorComponents[ i ];
if ( component === components[ componentIndex ] ) {
snippetValues.push( '1.0 - ' + ( sourceProperty + '.' + component ) );
componentIndex ++;
} else {
snippetValues.push( sourceProperty + '.' + component );
}
}
return `${ builder.getType( sourceType ) }( ${ snippetValues.join( ', ' ) } )`;
}
}
/**
* Base class for representing data input nodes.
*
* @augments Node
*/
class InputNode extends Node {
static get type() {
return 'InputNode';
}
/**
* Constructs a new input node.
*
* @param {Any} value - The value of this node. This can be a any JS primitive, functions, array buffers or even three.js objects (vector, matrices, colors).
* @param {String?} nodeType - The node type. If no explicit type is defined, the node tries to derive the type from its value.
*/
constructor( value, nodeType = null ) {
super( nodeType );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isInputNode = true;
/**
* The value of this node. This can be a any JS primitive, functions, array buffers or even three.js objects (vector, matrices, colors).
*
* @type {Any}
*/
this.value = value;
/**
* The precision of the value in the shader.
*
* @type {('low'|'medium'|'high')?}
* @default null
*/
this.precision = null;
}
getNodeType( /*builder*/ ) {
if ( this.nodeType === null ) {
return getValueType( this.value );
}
return this.nodeType;
}
/**
* Returns the input type of the node which is by default the node type. Derived modules
* might overwrite this method and use a fixed type or compute one analytically.
*
* A typical example for different input and node types are textures. The input type of a
* normal RGBA texture is `texture` whereas its node type is `vec4`.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The input type.
*/
getInputType( builder ) {
return this.getNodeType( builder );
}
/**
* Sets the precision to the given value. The method can be
* overwritten in derived classes if the final precision must be computed
* analytically.
*
* @param {('low'|'medium'|'high')} precision - The precision of the input value in the shader.
* @return {InputNode} A reference to this node.
*/
setPrecision( precision ) {
this.precision = precision;
return this;
}
serialize( data ) {
super.serialize( data );
data.value = this.value;
if ( this.value && this.value.toArray ) data.value = this.value.toArray();
data.valueType = getValueType( this.value );
data.nodeType = this.nodeType;
if ( data.valueType === 'ArrayBuffer' ) data.value = arrayBufferToBase64( data.value );
data.precision = this.precision;
}
deserialize( data ) {
super.deserialize( data );
this.nodeType = data.nodeType;
this.value = Array.isArray( data.value ) ? getValueFromType( data.valueType, ...data.value ) : data.value;
this.precision = data.precision || null;
if ( this.value && this.value.fromArray ) this.value = this.value.fromArray( data.value );
}
generate( /*builder, output*/ ) {
console.warn( 'Abstract function.' );
}
}
const _regNum = /float|u?int/;
/**
* Class for representing a constant value in the shader.
*
* @augments InputNode
*/
class ConstNode extends InputNode {
static get type() {
return 'ConstNode';
}
/**
* Constructs a new input node.
*
* @param {Any} value - The value of this node. Usually a JS primitive or three.js object (vector, matrix, color).
* @param {String?} nodeType - The node type. If no explicit type is defined, the node tries to derive the type from its value.
*/
constructor( value, nodeType = null ) {
super( value, nodeType );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isConstNode = true;
}
/**
* Generates the shader string of the value with the current node builder.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The generated value as a shader string.
*/
generateConst( builder ) {
return builder.generateConst( this.getNodeType( builder ), this.value );
}
generate( builder, output ) {
const type = this.getNodeType( builder );
if ( _regNum.test( type ) && _regNum.test( output ) ) {
return builder.generateConst( output, this.value );
}
return builder.format( this.generateConst( builder ), type, output );
}
}
/**
* Base class for representing member access on an object-like
* node data structures.
*
* @augments Node
*/
class MemberNode extends Node {
static get type() {
return 'MemberNode';
}
/**
* Constructs an array element node.
*
* @param {Node} node - The array-like node.
* @param {String} property - The property name.
*/
constructor( node, property ) {
super();
/**
* The array-like node.
*
* @type {Node}
*/
this.node = node;
/**
* The property name.
*
* @type {Node}
*/
this.property = property;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isMemberNode = true;
}
getNodeType( builder ) {
return this.node.getMemberType( builder, this.property );
}
generate( builder ) {
const propertyName = this.node.build( builder );
return propertyName + '.' + this.property;
}
}
/** @module TSLCore **/
let currentStack = null;
const NodeElements = new Map();
function addMethodChaining( name, nodeElement ) {
if ( NodeElements.has( name ) ) {
console.warn( `Redefinition of method chaining ${ name }` );
return;
}
if ( typeof nodeElement !== 'function' ) throw new Error( `Node element ${ name } is not a function` );
NodeElements.set( name, nodeElement );
}
const parseSwizzle = ( props ) => props.replace( /r|s/g, 'x' ).replace( /g|t/g, 'y' ).replace( /b|p/g, 'z' ).replace( /a|q/g, 'w' );
const parseSwizzleAndSort = ( props ) => parseSwizzle( props ).split( '' ).sort().join( '' );
const shaderNodeHandler = {
setup( NodeClosure, params ) {
const inputs = params.shift();
return NodeClosure( nodeObjects( inputs ), ...params );
},
get( node, prop, nodeObj ) {
if ( typeof prop === 'string' && node[ prop ] === undefined ) {
if ( node.isStackNode !== true && prop === 'assign' ) {
return ( ...params ) => {
currentStack.assign( nodeObj, ...params );
return nodeObj;
};
} else if ( NodeElements.has( prop ) ) {
const nodeElement = NodeElements.get( prop );
return node.isStackNode ? ( ...params ) => nodeObj.add( nodeElement( ...params ) ) : ( ...params ) => nodeElement( nodeObj, ...params );
} else if ( prop === 'self' ) {
return node;
} else if ( prop.endsWith( 'Assign' ) && NodeElements.has( prop.slice( 0, prop.length - 'Assign'.length ) ) ) {
const nodeElement = NodeElements.get( prop.slice( 0, prop.length - 'Assign'.length ) );
return node.isStackNode ? ( ...params ) => nodeObj.assign( params[ 0 ], nodeElement( ...params ) ) : ( ...params ) => nodeObj.assign( nodeElement( nodeObj, ...params ) );
} else if ( /^[xyzwrgbastpq]{1,4}$/.test( prop ) === true ) {
// accessing properties ( swizzle )
prop = parseSwizzle( prop );
return nodeObject( new SplitNode( nodeObj, prop ) );
} else if ( /^set[XYZWRGBASTPQ]{1,4}$/.test( prop ) === true ) {
// set properties ( swizzle ) and sort to xyzw sequence
prop = parseSwizzleAndSort( prop.slice( 3 ).toLowerCase() );
return ( value ) => nodeObject( new SetNode( node, prop, value ) );
} else if ( /^flip[XYZWRGBASTPQ]{1,4}$/.test( prop ) === true ) {
// set properties ( swizzle ) and sort to xyzw sequence
prop = parseSwizzleAndSort( prop.slice( 4 ).toLowerCase() );
return () => nodeObject( new FlipNode( nodeObject( node ), prop ) );
} else if ( prop === 'width' || prop === 'height' || prop === 'depth' ) {
// accessing property
if ( prop === 'width' ) prop = 'x';
else if ( prop === 'height' ) prop = 'y';
else if ( prop === 'depth' ) prop = 'z';
return nodeObject( new SplitNode( node, prop ) );
} else if ( /^\d+$/.test( prop ) === true ) {
// accessing array
return nodeObject( new ArrayElementNode( nodeObj, new ConstNode( Number( prop ), 'uint' ) ) );
} else if ( /^get$/.test( prop ) === true ) {
// accessing properties
return ( value ) => nodeObject( new MemberNode( nodeObj, value ) );
}
}
return Reflect.get( node, prop, nodeObj );
},
set( node, prop, value, nodeObj ) {
if ( typeof prop === 'string' && node[ prop ] === undefined ) {
// setting properties
if ( /^[xyzwrgbastpq]{1,4}$/.test( prop ) === true || prop === 'width' || prop === 'height' || prop === 'depth' || /^\d+$/.test( prop ) === true ) {
nodeObj[ prop ].assign( value );
return true;
}
}
return Reflect.set( node, prop, value, nodeObj );
}
};
const nodeObjectsCacheMap = new WeakMap();
const nodeBuilderFunctionsCacheMap = new WeakMap();
const ShaderNodeObject = function ( obj, altType = null ) {
const type = getValueType( obj );
if ( type === 'node' ) {
let nodeObject = nodeObjectsCacheMap.get( obj );
if ( nodeObject === undefined ) {
nodeObject = new Proxy( obj, shaderNodeHandler );
nodeObjectsCacheMap.set( obj, nodeObject );
nodeObjectsCacheMap.set( nodeObject, nodeObject );
}
return nodeObject;
} else if ( ( altType === null && ( type === 'float' || type === 'boolean' ) ) || ( type && type !== 'shader' && type !== 'string' ) ) {
return nodeObject( getConstNode( obj, altType ) );
} else if ( type === 'shader' ) {
return Fn( obj );
}
return obj;
};
const ShaderNodeObjects = function ( objects, altType = null ) {
for ( const name in objects ) {
objects[ name ] = nodeObject( objects[ name ], altType );
}
return objects;
};
const ShaderNodeArray = function ( array, altType = null ) {
const len = array.length;
for ( let i = 0; i < len; i ++ ) {
array[ i ] = nodeObject( array[ i ], altType );
}
return array;
};
const ShaderNodeProxy = function ( NodeClass, scope = null, factor = null, settings = null ) {
const assignNode = ( node ) => nodeObject( settings !== null ? Object.assign( node, settings ) : node );
if ( scope === null ) {
return ( ...params ) => {
return assignNode( new NodeClass( ...nodeArray( params ) ) );
};
} else if ( factor !== null ) {
factor = nodeObject( factor );
return ( ...params ) => {
return assignNode( new NodeClass( scope, ...nodeArray( params ), factor ) );
};
} else {
return ( ...params ) => {
return assignNode( new NodeClass( scope, ...nodeArray( params ) ) );
};
}
};
const ShaderNodeImmutable = function ( NodeClass, ...params ) {
return nodeObject( new NodeClass( ...nodeArray( params ) ) );
};
class ShaderCallNodeInternal extends Node {
constructor( shaderNode, inputNodes ) {
super();
this.shaderNode = shaderNode;
this.inputNodes = inputNodes;
}
getNodeType( builder ) {
return this.shaderNode.nodeType || this.getOutputNode( builder ).getNodeType( builder );
}
getMemberType( builder, name ) {
return this.getOutputNode( builder ).getMemberType( builder, name );
}
call( builder ) {
const { shaderNode, inputNodes } = this;
const properties = builder.getNodeProperties( shaderNode );
if ( properties.onceOutput ) return properties.onceOutput;
//
let result = null;
if ( shaderNode.layout ) {
let functionNodesCacheMap = nodeBuilderFunctionsCacheMap.get( builder.constructor );
if ( functionNodesCacheMap === undefined ) {
functionNodesCacheMap = new WeakMap();
nodeBuilderFunctionsCacheMap.set( builder.constructor, functionNodesCacheMap );
}
let functionNode = functionNodesCacheMap.get( shaderNode );
if ( functionNode === undefined ) {
functionNode = nodeObject( builder.buildFunctionNode( shaderNode ) );
functionNodesCacheMap.set( shaderNode, functionNode );
}
if ( builder.currentFunctionNode !== null ) {
builder.currentFunctionNode.includes.push( functionNode );
}
result = nodeObject( functionNode.call( inputNodes ) );
} else {
const jsFunc = shaderNode.jsFunc;
const outputNode = inputNodes !== null || jsFunc.length > 1 ? jsFunc( inputNodes || [], builder ) : jsFunc( builder );
result = nodeObject( outputNode );
}
if ( shaderNode.once ) {
properties.onceOutput = result;
}
return result;
}
getOutputNode( builder ) {
const properties = builder.getNodeProperties( this );
if ( properties.outputNode === null ) {
properties.outputNode = this.setupOutput( builder );
}
return properties.outputNode;
}
setup( builder ) {
return this.getOutputNode( builder );
}
setupOutput( builder ) {
builder.addStack();
builder.stack.outputNode = this.call( builder );
return builder.removeStack();
}
generate( builder, output ) {
const outputNode = this.getOutputNode( builder );
return outputNode.build( builder, output );
}
}
class ShaderNodeInternal extends Node {
constructor( jsFunc, nodeType ) {
super( nodeType );
this.jsFunc = jsFunc;
this.layout = null;
this.global = true;
this.once = false;
}
setLayout( layout ) {
this.layout = layout;
return this;
}
call( inputs = null ) {
nodeObjects( inputs );
return nodeObject( new ShaderCallNodeInternal( this, inputs ) );
}
setup() {
return this.call();
}
}
const bools = [ false, true ];
const uints = [ 0, 1, 2, 3 ];
const ints = [ - 1, - 2 ];
const floats = [ 0.5, 1.5, 1 / 3, 1e-6, 1e6, Math.PI, Math.PI * 2, 1 / Math.PI, 2 / Math.PI, 1 / ( Math.PI * 2 ), Math.PI / 2 ];
const boolsCacheMap = new Map();
for ( const bool of bools ) boolsCacheMap.set( bool, new ConstNode( bool ) );
const uintsCacheMap = new Map();
for ( const uint of uints ) uintsCacheMap.set( uint, new ConstNode( uint, 'uint' ) );
const intsCacheMap = new Map( [ ...uintsCacheMap ].map( el => new ConstNode( el.value, 'int' ) ) );
for ( const int of ints ) intsCacheMap.set( int, new ConstNode( int, 'int' ) );
const floatsCacheMap = new Map( [ ...intsCacheMap ].map( el => new ConstNode( el.value ) ) );
for ( const float of floats ) floatsCacheMap.set( float, new ConstNode( float ) );
for ( const float of floats ) floatsCacheMap.set( - float, new ConstNode( - float ) );
const cacheMaps = { bool: boolsCacheMap, uint: uintsCacheMap, ints: intsCacheMap, float: floatsCacheMap };
const constNodesCacheMap = new Map( [ ...boolsCacheMap, ...floatsCacheMap ] );
const getConstNode = ( value, type ) => {
if ( constNodesCacheMap.has( value ) ) {
return constNodesCacheMap.get( value );
} else if ( value.isNode === true ) {
return value;
} else {
return new ConstNode( value, type );
}
};
const safeGetNodeType = ( node ) => {
try {
return node.getNodeType();
} catch ( _ ) {
return undefined;
}
};
const ConvertType = function ( type, cacheMap = null ) {
return ( ...params ) => {
if ( params.length === 0 || ( ! [ 'bool', 'float', 'int', 'uint' ].includes( type ) && params.every( param => typeof param !== 'object' ) ) ) {
params = [ getValueFromType( type, ...params ) ];
}
if ( params.length === 1 && cacheMap !== null && cacheMap.has( params[ 0 ] ) ) {
return nodeObject( cacheMap.get( params[ 0 ] ) );
}
if ( params.length === 1 ) {
const node = getConstNode( params[ 0 ], type );
if ( safeGetNodeType( node ) === type ) return nodeObject( node );
return nodeObject( new ConvertNode( node, type ) );
}
const nodes = params.map( param => getConstNode( param ) );
return nodeObject( new JoinNode( nodes, type ) );
};
};
// exports
const defined = ( v ) => typeof v === 'object' && v !== null ? v.value : v; // TODO: remove boolean conversion and defined function
// utils
const getConstNodeType = ( value ) => ( value !== undefined && value !== null ) ? ( value.nodeType || value.convertTo || ( typeof value === 'string' ? value : null ) ) : null;
// shader node base
function ShaderNode( jsFunc, nodeType ) {
return new Proxy( new ShaderNodeInternal( jsFunc, nodeType ), shaderNodeHandler );
}
const nodeObject = ( val, altType = null ) => /* new */ ShaderNodeObject( val, altType );
const nodeObjects = ( val, altType = null ) => new ShaderNodeObjects( val, altType );
const nodeArray = ( val, altType = null ) => new ShaderNodeArray( val, altType );
const nodeProxy = ( ...params ) => new ShaderNodeProxy( ...params );
const nodeImmutable = ( ...params ) => new ShaderNodeImmutable( ...params );
const Fn = ( jsFunc, nodeType ) => {
const shaderNode = new ShaderNode( jsFunc, nodeType );
const fn = ( ...params ) => {
let inputs;
nodeObjects( params );
if ( params[ 0 ] && params[ 0 ].isNode ) {
inputs = [ ...params ];
} else {
inputs = params[ 0 ];
}
return shaderNode.call( inputs );
};
fn.shaderNode = shaderNode;
fn.setLayout = ( layout ) => {
shaderNode.setLayout( layout );
return fn;
};
fn.once = () => {
shaderNode.once = true;
return fn;
};
return fn;
};
/**
* @function
* @deprecated since r168. Use {@link Fn} instead.
*
* @param {...any} params
* @returns {Function}
*/
const tslFn = ( ...params ) => { // @deprecated, r168
console.warn( 'TSL.ShaderNode: tslFn() has been renamed to Fn().' );
return Fn( ...params );
};
//
addMethodChaining( 'toGlobal', ( node ) => {
node.global = true;
return node;
} );
//
const setCurrentStack = ( stack ) => {
currentStack = stack;
};
const getCurrentStack = () => currentStack;
const If = ( ...params ) => currentStack.If( ...params );
function append( node ) {
if ( currentStack ) currentStack.add( node );
return node;
}
addMethodChaining( 'append', append );
// types
const color = new ConvertType( 'color' );
const float = new ConvertType( 'float', cacheMaps.float );
const int = new ConvertType( 'int', cacheMaps.ints );
const uint = new ConvertType( 'uint', cacheMaps.uint );
const bool = new ConvertType( 'bool', cacheMaps.bool );
const vec2 = new ConvertType( 'vec2' );
const ivec2 = new ConvertType( 'ivec2' );
const uvec2 = new ConvertType( 'uvec2' );
const bvec2 = new ConvertType( 'bvec2' );
const vec3 = new ConvertType( 'vec3' );
const ivec3 = new ConvertType( 'ivec3' );
const uvec3 = new ConvertType( 'uvec3' );
const bvec3 = new ConvertType( 'bvec3' );
const vec4 = new ConvertType( 'vec4' );
const ivec4 = new ConvertType( 'ivec4' );
const uvec4 = new ConvertType( 'uvec4' );
const bvec4 = new ConvertType( 'bvec4' );
const mat2 = new ConvertType( 'mat2' );
const mat3 = new ConvertType( 'mat3' );
const mat4 = new ConvertType( 'mat4' );
const string = ( value = '' ) => nodeObject( new ConstNode( value, 'string' ) );
const arrayBuffer = ( value ) => nodeObject( new ConstNode( value, 'ArrayBuffer' ) );
addMethodChaining( 'toColor', color );
addMethodChaining( 'toFloat', float );
addMethodChaining( 'toInt', int );
addMethodChaining( 'toUint', uint );
addMethodChaining( 'toBool', bool );
addMethodChaining( 'toVec2', vec2 );
addMethodChaining( 'toIVec2', ivec2 );
addMethodChaining( 'toUVec2', uvec2 );
addMethodChaining( 'toBVec2', bvec2 );
addMethodChaining( 'toVec3', vec3 );
addMethodChaining( 'toIVec3', ivec3 );
addMethodChaining( 'toUVec3', uvec3 );
addMethodChaining( 'toBVec3', bvec3 );
addMethodChaining( 'toVec4', vec4 );
addMethodChaining( 'toIVec4', ivec4 );
addMethodChaining( 'toUVec4', uvec4 );
addMethodChaining( 'toBVec4', bvec4 );
addMethodChaining( 'toMat2', mat2 );
addMethodChaining( 'toMat3', mat3 );
addMethodChaining( 'toMat4', mat4 );
// basic nodes
const element = /*@__PURE__*/ nodeProxy( ArrayElementNode );
const convert = ( node, types ) => nodeObject( new ConvertNode( nodeObject( node ), types ) );
const split = ( node, channels ) => nodeObject( new SplitNode( nodeObject( node ), channels ) );
addMethodChaining( 'element', element );
addMethodChaining( 'convert', convert );
/** @module ArrayNode **/
/**
* ArrayNode represents a collection of nodes, typically created using the {@link module:TSL~array} function.
* ```js
* const colors = array( [
* vec3( 1, 0, 0 ),
* vec3( 0, 1, 0 ),
* vec3( 0, 0, 1 )
* ] );
*
* const redColor = tintColors.element( 0 );
*
* @augments Node
*/
class ArrayNode extends TempNode {
static get type() {
return 'ArrayNode';
}
/**
* Constructs a new array node.
*
* @param {String} [nodeType] - The data type of the elements.
* @param {Number} [count] - Size of the array.
* @param {Array<Node>?} [values=null] - Array default values.
*/
constructor( nodeType, count, values = null ) {
super( nodeType );
/**
* Array size.
*
* @type {Array<Node>}
*/
this.count = count;
/**
* Array default values.
*
* @type {Array<Node>}
*/
this.values = values;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isArrayNode = true;
}
getNodeType( builder ) {
if ( this.nodeType === null ) {
this.nodeType = this.values[ 0 ].getNodeType( builder );
}
return this.nodeType;
}
getElementType( builder ) {
return this.getNodeType( builder );
}
generate( builder ) {
const type = this.getNodeType( builder );
return builder.generateArray( type, this.count, this.values );
}
}
/**
* TSL function for creating an array node.
*
* @function
* @param {String|Array<Node>} nodeTypeOrValues - A string representing the element type (e.g., 'vec3')
* or an array containing the default values (e.g., [ vec3() ]).
* @param {Number?} [count] - Size of the array.
* @returns {ArrayNode}
*/
const array = ( ...params ) => {
let node;
if ( params.length === 1 ) {
const values = params[ 0 ];
node = new ArrayNode( null, values.length, values );
} else {
const nodeType = params[ 0 ];
const count = params[ 1 ];
node = new ArrayNode( nodeType, count );
}
return nodeObject( node );
};
addMethodChaining( 'toArray', ( node, count ) => array( Array( count ).fill( node ) ) );
/** @module UniformGroupNode **/
/**
* This node can be used to group single instances of {@link UniformNode}
* and manage them as a uniform buffer.
*
* In most cases, the predefined nodes `objectGroup`, `renderGroup` and `frameGroup`
* will be used when defining the {@link UniformNode#groupNode} property.
*
* - `objectGroup`: Uniform buffer per object.
* - `renderGroup`: Shared uniform buffer, updated once per render call.
* - `frameGroup`: Shared uniform buffer, updated once per frame.
*
* @augments Node
*/
class UniformGroupNode extends Node {
static get type() {
return 'UniformGroupNode';
}
/**
* Constructs a new uniform group node.
*
* @param {String} name - The name of the uniform group node.
* @param {Boolean} [shared=false] - Whether this uniform group node is shared or not.
* @param {Number} [order=1] - Influences the internal sorting.
*/
constructor( name, shared = false, order = 1 ) {
super( 'string' );
/**
* The name of the uniform group node.
*
* @type {String}
*/
this.name = name;
/**
* Whether this uniform group node is shared or not.
*
* @type {Boolean}
* @default false
*/
this.shared = shared;
/**
* Influences the internal sorting.
* TODO: Add details when this property should be changed.
*
* @type {Number}
* @default 1
*/
this.order = order;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isUniformGroup = true;
}
serialize( data ) {
super.serialize( data );
data.name = this.name;
data.version = this.version;
data.shared = this.shared;
}
deserialize( data ) {
super.deserialize( data );
this.name = data.name;
this.version = data.version;
this.shared = data.shared;
}
}
/**
* TSL function for creating a uniform group node with the given name.
*
* @function
* @param {String} name - The name of the uniform group node.
* @returns {UniformGroupNode}
*/
const uniformGroup = ( name ) => new UniformGroupNode( name );
/**
* TSL function for creating a shared uniform group node with the given name and order.
*
* @function
* @param {String} name - The name of the uniform group node.
* @param {Number} [order=0] - Influences the internal sorting.
* @returns {UniformGroupNode}
*/
const sharedUniformGroup = ( name, order = 0 ) => new UniformGroupNode( name, true, order );
/**
* TSL object that represents a shared uniform group node which is updated once per frame.
*
* @type {UniformGroupNode}
*/
const frameGroup = /*@__PURE__*/ sharedUniformGroup( 'frame' );
/**
* TSL object that represents a shared uniform group node which is updated once per render.
*
* @type {UniformGroupNode}
*/
const renderGroup = /*@__PURE__*/ sharedUniformGroup( 'render' );
/**
* TSL object that represents a uniform group node which is updated once per object.
*
* @type {UniformGroupNode}
*/
const objectGroup = /*@__PURE__*/ uniformGroup( 'object' );
/** @module UniformNode **/
/**
* Class for representing a uniform.
*
* @augments InputNode
*/
class UniformNode extends InputNode {
static get type() {
return 'UniformNode';
}
/**
* Constructs a new uniform node.
*
* @param {Any} value - The value of this node. Usually a JS primitive or three.js object (vector, matrix, color, texture).
* @param {String?} nodeType - The node type. If no explicit type is defined, the node tries to derive the type from its value.
*/
constructor( value, nodeType = null ) {
super( value, nodeType );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isUniformNode = true;
/**
* The name or label of the uniform.
*
* @type {String}
* @default ''
*/
this.name = '';
/**
* The uniform group of this uniform. By default, uniforms are
* managed per object but they might belong to a shared group
* which is updated per frame or render call.
*
* @type {UniformGroupNode}
*/
this.groupNode = objectGroup;
}
/**
* Sets the {@link UniformNode#name} property.
*
* @param {String} name - The name of the uniform.
* @return {UniformNode} A reference to this node.
*/
label( name ) {
this.name = name;
return this;
}
/**
* Sets the {@link UniformNode#groupNode} property.
*
* @param {UniformGroupNode} group - The uniform group.
* @return {UniformNode} A reference to this node.
*/
setGroup( group ) {
this.groupNode = group;
return this;
}
/**
* Returns the {@link UniformNode#groupNode}.
*
* @return {UniformGroupNode} The uniform group.
*/
getGroup() {
return this.groupNode;
}
/**
* By default, this method returns the result of {@link Node#getHash} but derived
* classes might overwrite this method with a different implementation.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The uniform hash.
*/
getUniformHash( builder ) {
return this.getHash( builder );
}
onUpdate( callback, updateType ) {
const self = this.getSelf();
callback = callback.bind( self );
return super.onUpdate( ( frame ) => {
const value = callback( frame, self );
if ( value !== undefined ) {
this.value = value;
}
}, updateType );
}
generate( builder, output ) {
const type = this.getNodeType( builder );
const hash = this.getUniformHash( builder );
let sharedNode = builder.getNodeFromHash( hash );
if ( sharedNode === undefined ) {
builder.setHashNode( this, hash );
sharedNode = this;
}
const sharedNodeType = sharedNode.getInputType( builder );
const nodeUniform = builder.getUniformFromNode( sharedNode, sharedNodeType, builder.shaderStage, this.name || builder.context.label );
const propertyName = builder.getPropertyName( nodeUniform );
if ( builder.context.label !== undefined ) delete builder.context.label;
return builder.format( propertyName, type, output );
}
}
/**
* TSL function for creating a uniform node.
*
* @function
* @param {Any} arg1 - The value of this node. Usually a JS primitive or three.js object (vector, matrix, color, texture).
* @param {String?} arg2 - The node type. If no explicit type is defined, the node tries to derive the type from its value.
* @returns {UniformNode}
*/
const uniform = ( arg1, arg2 ) => {
const nodeType = getConstNodeType( arg2 || arg1 );
// @TODO: get ConstNode from .traverse() in the future
const value = ( arg1 && arg1.isNode === true ) ? ( arg1.node && arg1.node.value ) || arg1.value : arg1;
return nodeObject( new UniformNode( value, nodeType ) );
};
/** @module PropertyNode **/
/**
* This class represents a shader property. It can be used
* to explicitly define a property and assign a value to it.
*
* ```js
* const threshold = property( 'float', 'threshold' ).assign( THRESHOLD );
*```
* `PropertyNode` is used by the engine to predefined common material properties
* for TSL code.
*
* @augments Node
*/
class PropertyNode extends Node {
static get type() {
return 'PropertyNode';
}
/**
* Constructs a new property node.
*
* @param {String} nodeType - The type of the node.
* @param {String?} [name=null] - The name of the property in the shader.
* @param {Boolean} [varying=false] - Whether this property is a varying or not.
*/
constructor( nodeType, name = null, varying = false ) {
super( nodeType );
/**
* The name of the property in the shader. If no name is defined,
* the node system auto-generates one.
*
* @type {String?}
* @default null
*/
this.name = name;
/**
* Whether this property is a varying or not.
*
* @type {Boolean}
* @default false
*/
this.varying = varying;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isPropertyNode = true;
}
getHash( builder ) {
return this.name || super.getHash( builder );
}
/**
* The method is overwritten so it always returns `true`.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {Boolean} Whether this node is global or not.
*/
isGlobal( /*builder*/ ) {
return true;
}
generate( builder ) {
let nodeVar;
if ( this.varying === true ) {
nodeVar = builder.getVaryingFromNode( this, this.name );
nodeVar.needsInterpolation = true;
} else {
nodeVar = builder.getVarFromNode( this, this.name );
}
return builder.getPropertyName( nodeVar );
}
}
/**
* TSL function for creating a property node.
*
* @function
* @param {String} type - The type of the node.
* @param {String?} [name=null] - The name of the property in the shader.
* @returns {PropertyNode}
*/
const property = ( type, name ) => nodeObject( new PropertyNode( type, name ) );
/**
* TSL function for creating a varying property node.
*
* @function
* @param {String} type - The type of the node.
* @param {String?} [name=null] - The name of the varying in the shader.
* @returns {PropertyNode}
*/
const varyingProperty = ( type, name ) => nodeObject( new PropertyNode( type, name, true ) );
/**
* TSL object that represents the shader variable `DiffuseColor`.
*
* @type {PropertyNode<vec4>}
*/
const diffuseColor = /*@__PURE__*/ nodeImmutable( PropertyNode, 'vec4', 'DiffuseColor' );
/**
* TSL object that represents the shader variable `EmissiveColor`.
*
* @type {PropertyNode<vec3>}
*/
const emissive = /*@__PURE__*/ nodeImmutable( PropertyNode, 'vec3', 'EmissiveColor' );
/**
* TSL object that represents the shader variable `Roughness`.
*
* @type {PropertyNode<float>}
*/
const roughness = /*@__PURE__*/ nodeImmutable( PropertyNode, 'float', 'Roughness' );
/**
* TSL object that represents the shader variable `Metalness`.
*
* @type {PropertyNode<float>}
*/
const metalness = /*@__PURE__*/ nodeImmutable( PropertyNode, 'float', 'Metalness' );
/**
* TSL object that represents the shader variable `Clearcoat`.
*
* @type {PropertyNode<float>}
*/
const clearcoat = /*@__PURE__*/ nodeImmutable( PropertyNode, 'float', 'Clearcoat' );
/**
* TSL object that represents the shader variable `ClearcoatRoughness`.
*
* @type {PropertyNode<float>}
*/
const clearcoatRoughness = /*@__PURE__*/ nodeImmutable( PropertyNode, 'float', 'ClearcoatRoughness' );
/**
* TSL object that represents the shader variable `Sheen`.
*
* @type {PropertyNode<vec3>}
*/
const sheen = /*@__PURE__*/ nodeImmutable( PropertyNode, 'vec3', 'Sheen' );
/**
* TSL object that represents the shader variable `SheenRoughness`.
*
* @type {PropertyNode<float>}
*/
const sheenRoughness = /*@__PURE__*/ nodeImmutable( PropertyNode, 'float', 'SheenRoughness' );
/**
* TSL object that represents the shader variable `Iridescence`.
*
* @type {PropertyNode<float>}
*/
const iridescence = /*@__PURE__*/ nodeImmutable( PropertyNode, 'float', 'Iridescence' );
/**
* TSL object that represents the shader variable `IridescenceIOR`.
*
* @type {PropertyNode<float>}
*/
const iridescenceIOR = /*@__PURE__*/ nodeImmutable( PropertyNode, 'float', 'IridescenceIOR' );
/**
* TSL object that represents the shader variable `IridescenceThickness`.
*
* @type {PropertyNode<float>}
*/
const iridescenceThickness = /*@__PURE__*/ nodeImmutable( PropertyNode, 'float', 'IridescenceThickness' );
/**
* TSL object that represents the shader variable `AlphaT`.
*
* @type {PropertyNode<float>}
*/
const alphaT = /*@__PURE__*/ nodeImmutable( PropertyNode, 'float', 'AlphaT' );
/**
* TSL object that represents the shader variable `Anisotropy`.
*
* @type {PropertyNode<float>}
*/
const anisotropy = /*@__PURE__*/ nodeImmutable( PropertyNode, 'float', 'Anisotropy' );
/**
* TSL object that represents the shader variable `AnisotropyT`.
*
* @type {PropertyNode<vec3>}
*/
const anisotropyT = /*@__PURE__*/ nodeImmutable( PropertyNode, 'vec3', 'AnisotropyT' );
/**
* TSL object that represents the shader variable `AnisotropyB`.
*
* @type {PropertyNode<vec3>}
*/
const anisotropyB = /*@__PURE__*/ nodeImmutable( PropertyNode, 'vec3', 'AnisotropyB' );
/**
* TSL object that represents the shader variable `SpecularColor`.
*
* @type {PropertyNode<color>}
*/
const specularColor = /*@__PURE__*/ nodeImmutable( PropertyNode, 'color', 'SpecularColor' );
/**
* TSL object that represents the shader variable `SpecularF90`.
*
* @type {PropertyNode<float>}
*/
const specularF90 = /*@__PURE__*/ nodeImmutable( PropertyNode, 'float', 'SpecularF90' );
/**
* TSL object that represents the shader variable `Shininess`.
*
* @type {PropertyNode<float>}
*/
const shininess = /*@__PURE__*/ nodeImmutable( PropertyNode, 'float', 'Shininess' );
/**
* TSL object that represents the shader variable `Output`.
*
* @type {PropertyNode<vec4>}
*/
const output = /*@__PURE__*/ nodeImmutable( PropertyNode, 'vec4', 'Output' );
/**
* TSL object that represents the shader variable `dashSize`.
*
* @type {PropertyNode<float>}
*/
const dashSize = /*@__PURE__*/ nodeImmutable( PropertyNode, 'float', 'dashSize' );
/**
* TSL object that represents the shader variable `gapSize`.
*
* @type {PropertyNode<float>}
*/
const gapSize = /*@__PURE__*/ nodeImmutable( PropertyNode, 'float', 'gapSize' );
/**
* TSL object that represents the shader variable `pointWidth`.
*
* @type {PropertyNode<float>}
*/
const pointWidth = /*@__PURE__*/ nodeImmutable( PropertyNode, 'float', 'pointWidth' );
/**
* TSL object that represents the shader variable `IOR`.
*
* @type {PropertyNode<float>}
*/
const ior = /*@__PURE__*/ nodeImmutable( PropertyNode, 'float', 'IOR' );
/**
* TSL object that represents the shader variable `Transmission`.
*
* @type {PropertyNode<float>}
*/
const transmission = /*@__PURE__*/ nodeImmutable( PropertyNode, 'float', 'Transmission' );
/**
* TSL object that represents the shader variable `Thickness`.
*
* @type {PropertyNode<float>}
*/
const thickness = /*@__PURE__*/ nodeImmutable( PropertyNode, 'float', 'Thickness' );
/**
* TSL object that represents the shader variable `AttenuationDistance`.
*
* @type {PropertyNode<float>}
*/
const attenuationDistance = /*@__PURE__*/ nodeImmutable( PropertyNode, 'float', 'AttenuationDistance' );
/**
* TSL object that represents the shader variable `AttenuationColor`.
*
* @type {PropertyNode<color>}
*/
const attenuationColor = /*@__PURE__*/ nodeImmutable( PropertyNode, 'color', 'AttenuationColor' );
/**
* TSL object that represents the shader variable `Dispersion`.
*
* @type {PropertyNode<float>}
*/
const dispersion = /*@__PURE__*/ nodeImmutable( PropertyNode, 'float', 'Dispersion' );
/** @module AssignNode **/
/**
* These node represents an assign operation. Meaning a node is assigned
* to another node.
*
* @augments TempNode
*/
class AssignNode extends TempNode {
static get type() {
return 'AssignNode';
}
/**
* Constructs a new assign node.
*
* @param {Node} targetNode - The target node.
* @param {Node} sourceNode - The source type.
*/
constructor( targetNode, sourceNode ) {
super();
/**
* The target node.
*
* @type {Node}
*/
this.targetNode = targetNode;
/**
* The source node.
*
* @type {Node}
*/
this.sourceNode = sourceNode;
}
/**
* Whether this node is used more than once in context of other nodes. This method
* is overwritten since it always returns `false` (assigns are unique).
*
* @return {Boolean} A flag that indicates if there is more than one dependency to other nodes. Always `false`.
*/
hasDependencies() {
return false;
}
getNodeType( builder, output ) {
return output !== 'void' ? this.targetNode.getNodeType( builder ) : 'void';
}
/**
* Whether a split is required when assigning source to target. This can happen when the component length of
* target and source data type does not match.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {Boolean} Whether a split is required when assigning source to target.
*/
needsSplitAssign( builder ) {
const { targetNode } = this;
if ( builder.isAvailable( 'swizzleAssign' ) === false && targetNode.isSplitNode && targetNode.components.length > 1 ) {
const targetLength = builder.getTypeLength( targetNode.node.getNodeType( builder ) );
const assignDifferentVector = vectorComponents.join( '' ).slice( 0, targetLength ) !== targetNode.components;
return assignDifferentVector;
}
return false;
}
generate( builder, output ) {
const { targetNode, sourceNode } = this;
const needsSplitAssign = this.needsSplitAssign( builder );
const targetType = targetNode.getNodeType( builder );
const target = targetNode.context( { assign: true } ).build( builder );
const source = sourceNode.build( builder, targetType );
const sourceType = sourceNode.getNodeType( builder );
const nodeData = builder.getDataFromNode( this );
//
let snippet;
if ( nodeData.initialized === true ) {
if ( output !== 'void' ) {
snippet = target;
}
} else if ( needsSplitAssign ) {
const sourceVar = builder.getVarFromNode( this, null, targetType );
const sourceProperty = builder.getPropertyName( sourceVar );
builder.addLineFlowCode( `${ sourceProperty } = ${ source }`, this );
const targetRoot = targetNode.node.context( { assign: true } ).build( builder );
for ( let i = 0; i < targetNode.components.length; i ++ ) {
const component = targetNode.components[ i ];
builder.addLineFlowCode( `${ targetRoot }.${ component } = ${ sourceProperty }[ ${ i } ]`, this );
}
if ( output !== 'void' ) {
snippet = target;
}
} else {
snippet = `${ target } = ${ source }`;
if ( output === 'void' || sourceType === 'void' ) {
builder.addLineFlowCode( snippet, this );
if ( output !== 'void' ) {
snippet = target;
}
}
}
nodeData.initialized = true;
return builder.format( snippet, targetType, output );
}
}
/**
* TSL function for creating an assign node.
*
* @function
* @param {Node} targetNode - The target node.
* @param {Node} sourceNode - The source type.
* @returns {AssignNode}
*/
const assign = /*@__PURE__*/ nodeProxy( AssignNode );
addMethodChaining( 'assign', assign );
/**
* This module represents the call of a {@link FunctionNode}. Developers are usually not confronted
* with this module since they use the predefined TSL syntax `wgslFn` and `glslFn` which encapsulate
* this logic.
*
* @augments TempNode
*/
class FunctionCallNode extends TempNode {
static get type() {
return 'FunctionCallNode';
}
/**
* Constructs a new function call node.
*
* @param {FunctionNode?} functionNode - The function node.
* @param {Object<String, Node>} [parameters={}] - The parameters for the function call.
*/
constructor( functionNode = null, parameters = {} ) {
super();
/**
* The function node.
*
* @type {FunctionNode}
* @default null
*/
this.functionNode = functionNode;
/**
* The parameters of the function call.
*
* @type {Object<String, Node>}
* @default {}
*/
this.parameters = parameters;
}
/**
* Sets the parameters of the function call node.
*
* @param {Object<String, Node>} parameters - The parameters to set.
* @return {FunctionCallNode} A reference to this node.
*/
setParameters( parameters ) {
this.parameters = parameters;
return this;
}
/**
* Returns the parameters of the function call node.
*
* @return {Object<String, Node>} The parameters of this node.
*/
getParameters() {
return this.parameters;
}
getNodeType( builder ) {
return this.functionNode.getNodeType( builder );
}
generate( builder ) {
const params = [];
const functionNode = this.functionNode;
const inputs = functionNode.getInputs( builder );
const parameters = this.parameters;
const generateInput = ( node, inputNode ) => {
const type = inputNode.type;
const pointer = type === 'pointer';
let output;
if ( pointer ) output = '&' + node.build( builder );
else output = node.build( builder, type );
return output;
};
if ( Array.isArray( parameters ) ) {
for ( let i = 0; i < parameters.length; i ++ ) {
params.push( generateInput( parameters[ i ], inputs[ i ] ) );
}
} else {
for ( const inputNode of inputs ) {
const node = parameters[ inputNode.name ];
if ( node !== undefined ) {
params.push( generateInput( node, inputNode ) );
} else {
throw new Error( `FunctionCallNode: Input '${inputNode.name}' not found in FunctionNode.` );
}
}
}
const functionName = functionNode.build( builder, 'property' );
return `${functionName}( ${params.join( ', ' )} )`;
}
}
const call = ( func, ...params ) => {
params = params.length > 1 || ( params[ 0 ] && params[ 0 ].isNode === true ) ? nodeArray( params ) : nodeObjects( params[ 0 ] );
return nodeObject( new FunctionCallNode( nodeObject( func ), params ) );
};
addMethodChaining( 'call', call );
/** @module OperatorNode **/
/**
* This node represents basic mathematical and logical operations like addition,
* subtraction or comparisons (e.g. `equal()`).
*
* @augments TempNode
*/
class OperatorNode extends TempNode {
static get type() {
return 'OperatorNode';
}
/**
* Constructs a new operator node.
*
* @param {String} op - The operator.
* @param {Node} aNode - The first input.
* @param {Node} bNode - The second input.
* @param {...Node} params - Additional input parameters.
*/
constructor( op, aNode, bNode, ...params ) {
super();
if ( params.length > 0 ) {
let finalOp = new OperatorNode( op, aNode, bNode );
for ( let i = 0; i < params.length - 1; i ++ ) {
finalOp = new OperatorNode( op, finalOp, params[ i ] );
}
aNode = finalOp;
bNode = params[ params.length - 1 ];
}
/**
* The operator.
*
* @type {String}
*/
this.op = op;
/**
* The first input.
*
* @type {Node}
*/
this.aNode = aNode;
/**
* The second input.
*
* @type {Node}
*/
this.bNode = bNode;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isOperatorNode = true;
}
/**
* This method is overwritten since the node type is inferred from the operator
* and the input node types.
*
* @param {NodeBuilder} builder - The current node builder.
* @param {String} output - The current output string.
* @return {String} The node type.
*/
getNodeType( builder, output ) {
const op = this.op;
const aNode = this.aNode;
const bNode = this.bNode;
const typeA = aNode.getNodeType( builder );
const typeB = typeof bNode !== 'undefined' ? bNode.getNodeType( builder ) : null;
if ( typeA === 'void' || typeB === 'void' ) {
return 'void';
} else if ( op === '%' ) {
return typeA;
} else if ( op === '~' || op === '&' || op === '|' || op === '^' || op === '>>' || op === '<<' ) {
return builder.getIntegerType( typeA );
} else if ( op === '!' || op === '==' || op === '&&' || op === '||' || op === '^^' ) {
return 'bool';
} else if ( op === '<' || op === '>' || op === '<=' || op === '>=' ) {
const typeLength = output ? builder.getTypeLength( output ) : Math.max( builder.getTypeLength( typeA ), builder.getTypeLength( typeB ) );
return typeLength > 1 ? `bvec${ typeLength }` : 'bool';
} else {
// Handle matrix operations
if ( builder.isMatrix( typeA ) ) {
if ( typeB === 'float' ) {
return typeA; // matrix * scalar = matrix
} else if ( builder.isVector( typeB ) ) {
return builder.getVectorFromMatrix( typeA ); // matrix * vector
} else if ( builder.isMatrix( typeB ) ) {
return typeA; // matrix * matrix
}
} else if ( builder.isMatrix( typeB ) ) {
if ( typeA === 'float' ) {
return typeB; // scalar * matrix = matrix
} else if ( builder.isVector( typeA ) ) {
return builder.getVectorFromMatrix( typeB ); // vector * matrix
}
}
// Handle non-matrix cases
if ( builder.getTypeLength( typeB ) > builder.getTypeLength( typeA ) ) {
// anytype x anytype: use the greater length vector
return typeB;
}
return typeA;
}
}
generate( builder, output ) {
const op = this.op;
const aNode = this.aNode;
const bNode = this.bNode;
const type = this.getNodeType( builder, output );
let typeA = null;
let typeB = null;
if ( type !== 'void' ) {
typeA = aNode.getNodeType( builder );
typeB = typeof bNode !== 'undefined' ? bNode.getNodeType( builder ) : null;
if ( op === '<' || op === '>' || op === '<=' || op === '>=' || op === '==' ) {
if ( builder.isVector( typeA ) ) {
typeB = typeA;
} else if ( typeA !== typeB ) {
typeA = typeB = 'float';
}
} else if ( op === '>>' || op === '<<' ) {
typeA = type;
typeB = builder.changeComponentType( typeB, 'uint' );
} else if ( builder.isMatrix( typeA ) ) {
if ( typeB === 'float' ) {
// Keep matrix type for typeA, but ensure typeB stays float
typeB = 'float';
} else if ( builder.isVector( typeB ) ) {
// matrix x vector
typeB = builder.getVectorFromMatrix( typeA );
} else if ( builder.isMatrix( typeB ) ) ; else {
typeA = typeB = type;
}
} else if ( builder.isMatrix( typeB ) ) {
if ( typeA === 'float' ) {
// Keep matrix type for typeB, but ensure typeA stays float
typeA = 'float';
} else if ( builder.isVector( typeA ) ) {
// vector x matrix
typeA = builder.getVectorFromMatrix( typeB );
} else {
typeA = typeB = type;
}
} else {
// anytype x anytype
typeA = typeB = type;
}
} else {
typeA = typeB = type;
}
const a = aNode.build( builder, typeA );
const b = typeof bNode !== 'undefined' ? bNode.build( builder, typeB ) : null;
const outputLength = builder.getTypeLength( output );
const fnOpSnippet = builder.getFunctionOperator( op );
if ( output !== 'void' ) {
if ( op === '<' && outputLength > 1 ) {
if ( builder.useComparisonMethod ) {
return builder.format( `${ builder.getMethod( 'lessThan', output ) }( ${ a }, ${ b } )`, type, output );
} else {
return builder.format( `( ${ a } < ${ b } )`, type, output );
}
} else if ( op === '<=' && outputLength > 1 ) {
if ( builder.useComparisonMethod ) {
return builder.format( `${ builder.getMethod( 'lessThanEqual', output ) }( ${ a }, ${ b } )`, type, output );
} else {
return builder.format( `( ${ a } <= ${ b } )`, type, output );
}
} else if ( op === '>' && outputLength > 1 ) {
if ( builder.useComparisonMethod ) {
return builder.format( `${ builder.getMethod( 'greaterThan', output ) }( ${ a }, ${ b } )`, type, output );
} else {
return builder.format( `( ${ a } > ${ b } )`, type, output );
}
} else if ( op === '>=' && outputLength > 1 ) {
if ( builder.useComparisonMethod ) {
return builder.format( `${ builder.getMethod( 'greaterThanEqual', output ) }( ${ a }, ${ b } )`, type, output );
} else {
return builder.format( `( ${ a } >= ${ b } )`, type, output );
}
} else if ( op === '!' || op === '~' ) {
return builder.format( `(${op}${a})`, typeA, output );
} else if ( fnOpSnippet ) {
return builder.format( `${ fnOpSnippet }( ${ a }, ${ b } )`, type, output );
} else {
// Handle matrix operations
if ( builder.isMatrix( typeA ) && typeB === 'float' ) {
return builder.format( `( ${ b } ${ op } ${ a } )`, type, output );
} else if ( typeA === 'float' && builder.isMatrix( typeB ) ) {
return builder.format( `${ a } ${ op } ${ b }`, type, output );
} else {
return builder.format( `( ${ a } ${ op } ${ b } )`, type, output );
}
}
} else if ( typeA !== 'void' ) {
if ( fnOpSnippet ) {
return builder.format( `${ fnOpSnippet }( ${ a }, ${ b } )`, type, output );
} else {
if ( builder.isMatrix( typeA ) && typeB === 'float' ) {
return builder.format( `${ b } ${ op } ${ a }`, type, output );
} else {
return builder.format( `${ a } ${ op } ${ b }`, type, output );
}
}
}
}
serialize( data ) {
super.serialize( data );
data.op = this.op;
}
deserialize( data ) {
super.deserialize( data );
this.op = data.op;
}
}
/**
* Returns the addition of two or more value.
*
* @function
* @param {Node} aNode - The first input.
* @param {Node} bNode - The second input.
* @param {...Node} params - Additional input parameters.
* @returns {OperatorNode}
*/
const add = /*@__PURE__*/ nodeProxy( OperatorNode, '+' );
/**
* Returns the subtraction of two or more value.
*
* @function
* @param {Node} aNode - The first input.
* @param {Node} bNode - The second input.
* @param {...Node} params - Additional input parameters.
* @returns {OperatorNode}
*/
const sub = /*@__PURE__*/ nodeProxy( OperatorNode, '-' );
/**
* Returns the multiplication of two or more value.
*
* @function
* @param {Node} aNode - The first input.
* @param {Node} bNode - The second input.
* @param {...Node} params - Additional input parameters.
* @returns {OperatorNode}
*/
const mul = /*@__PURE__*/ nodeProxy( OperatorNode, '*' );
/**
* Returns the division of two or more value.
*
* @function
* @param {Node} aNode - The first input.
* @param {Node} bNode - The second input.
* @param {...Node} params - Additional input parameters.
* @returns {OperatorNode}
*/
const div = /*@__PURE__*/ nodeProxy( OperatorNode, '/' );
/**
* Computes the remainder of dividing the first node by the second, for integer values.
*
* @function
* @param {Node} aNode - The first input.
* @param {Node} bNode - The second input.
* @returns {OperatorNode}
*/
const modInt = /*@__PURE__*/ nodeProxy( OperatorNode, '%' );
/**
* Checks if two nodes are equal.
*
* @function
* @param {Node} aNode - The first input.
* @param {Node} bNode - The second input.
* @returns {OperatorNode}
*/
const equal = /*@__PURE__*/ nodeProxy( OperatorNode, '==' );
/**
* Checks if two nodes are not equal.
*
* @function
* @param {Node} aNode - The first input.
* @param {Node} bNode - The second input.
* @returns {OperatorNode}
*/
const notEqual = /*@__PURE__*/ nodeProxy( OperatorNode, '!=' );
/**
* Checks if the first node is less than the second.
*
* @function
* @param {Node} aNode - The first input.
* @param {Node} bNode - The second input.
* @returns {OperatorNode}
*/
const lessThan = /*@__PURE__*/ nodeProxy( OperatorNode, '<' );
/**
* Checks if the first node is greater than the second.
*
* @function
* @param {Node} aNode - The first input.
* @param {Node} bNode - The second input.
* @returns {OperatorNode}
*/
const greaterThan = /*@__PURE__*/ nodeProxy( OperatorNode, '>' );
/**
* Checks if the first node is less than or equal to the second.
*
* @function
* @param {Node} aNode - The first input.
* @param {Node} bNode - The second input.
* @returns {OperatorNode}
*/
const lessThanEqual = /*@__PURE__*/ nodeProxy( OperatorNode, '<=' );
/**
* Checks if the first node is greater than or equal to the second.
*
* @function
* @param {Node} aNode - The first input.
* @param {Node} bNode - The second input.
* @returns {OperatorNode}
*/
const greaterThanEqual = /*@__PURE__*/ nodeProxy( OperatorNode, '>=' );
/**
* Performs logical AND on two nodes.
*
* @function
* @param {Node} aNode - The first input.
* @param {Node} bNode - The second input.
* @returns {OperatorNode}
*/
const and = /*@__PURE__*/ nodeProxy( OperatorNode, '&&' );
/**
* Performs logical OR on two nodes.
*
* @function
* @param {Node} aNode - The first input.
* @param {Node} bNode - The second input.
* @returns {OperatorNode}
*/
const or = /*@__PURE__*/ nodeProxy( OperatorNode, '||' );
/**
* Performs logical NOT on a node.
*
* @function
* @param {Node} aNode - The first input.
* @param {Node} bNode - The second input.
* @returns {OperatorNode}
*/
const not = /*@__PURE__*/ nodeProxy( OperatorNode, '!' );
/**
* Performs logical XOR on two nodes.
*
* @function
* @param {Node} aNode - The first input.
* @param {Node} bNode - The second input.
* @returns {OperatorNode}
*/
const xor = /*@__PURE__*/ nodeProxy( OperatorNode, '^^' );
/**
* Performs bitwise AND on two nodes.
*
* @function
* @param {Node} aNode - The first input.
* @param {Node} bNode - The second input.
* @returns {OperatorNode}
*/
const bitAnd = /*@__PURE__*/ nodeProxy( OperatorNode, '&' );
/**
* Performs bitwise NOT on a node.
*
* @function
* @param {Node} aNode - The first input.
* @param {Node} bNode - The second input.
* @returns {OperatorNode}
*/
const bitNot = /*@__PURE__*/ nodeProxy( OperatorNode, '~' );
/**
* Performs bitwise OR on two nodes.
*
* @function
* @param {Node} aNode - The first input.
* @param {Node} bNode - The second input.
* @returns {OperatorNode}
*/
const bitOr = /*@__PURE__*/ nodeProxy( OperatorNode, '|' );
/**
* Performs bitwise XOR on two nodes.
*
* @function
* @param {Node} aNode - The first input.
* @param {Node} bNode - The second input.
* @returns {OperatorNode}
*/
const bitXor = /*@__PURE__*/ nodeProxy( OperatorNode, '^' );
/**
* Shifts a node to the left.
*
* @function
* @param {Node} aNode - The node to shift.
* @param {Node} bNode - The value to shift.
* @returns {OperatorNode}
*/
const shiftLeft = /*@__PURE__*/ nodeProxy( OperatorNode, '<<' );
/**
* Shifts a node to the right.
*
* @function
* @param {Node} aNode - The node to shift.
* @param {Node} bNode - The value to shift.
* @returns {OperatorNode}
*/
const shiftRight = /*@__PURE__*/ nodeProxy( OperatorNode, '>>' );
addMethodChaining( 'add', add );
addMethodChaining( 'sub', sub );
addMethodChaining( 'mul', mul );
addMethodChaining( 'div', div );
addMethodChaining( 'modInt', modInt );
addMethodChaining( 'equal', equal );
addMethodChaining( 'notEqual', notEqual );
addMethodChaining( 'lessThan', lessThan );
addMethodChaining( 'greaterThan', greaterThan );
addMethodChaining( 'lessThanEqual', lessThanEqual );
addMethodChaining( 'greaterThanEqual', greaterThanEqual );
addMethodChaining( 'and', and );
addMethodChaining( 'or', or );
addMethodChaining( 'not', not );
addMethodChaining( 'xor', xor );
addMethodChaining( 'bitAnd', bitAnd );
addMethodChaining( 'bitNot', bitNot );
addMethodChaining( 'bitOr', bitOr );
addMethodChaining( 'bitXor', bitXor );
addMethodChaining( 'shiftLeft', shiftLeft );
addMethodChaining( 'shiftRight', shiftRight );
/**
* @function
* @deprecated since r168. Use {@link modInt} instead.
*
* @param {...any} params
* @returns {Function}
*/
const remainder = ( ...params ) => { // @deprecated, r168
console.warn( 'TSL.OperatorNode: .remainder() has been renamed to .modInt().' );
return modInt( ...params );
};
addMethodChaining( 'remainder', remainder );
/** @module MathNode **/
/**
* This node represents a variety of mathematical methods available in shaders.
* They are divided into three categories:
*
* - Methods with one input like `sin`, `cos` or `normalize`.
* - Methods with two inputs like `dot`, `cross` or `pow`.
* - Methods with three inputs like `mix`, `clamp` or `smoothstep`.
*
* @augments TempNode
*/
class MathNode extends TempNode {
static get type() {
return 'MathNode';
}
/**
* Constructs a new math node.
*
* @param {String} method - The method name.
* @param {Node} aNode - The first input.
* @param {Node?} [bNode=null] - The second input.
* @param {Node?} [cNode=null] - The third input.
*/
constructor( method, aNode, bNode = null, cNode = null ) {
super();
/**
* The method name.
*
* @type {String}
*/
this.method = method;
/**
* The first input.
*
* @type {Node}
*/
this.aNode = aNode;
/**
* The second input.
*
* @type {Node?}
* @default null
*/
this.bNode = bNode;
/**
* The third input.
*
* @type {Node?}
* @default null
*/
this.cNode = cNode;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isMathNode = true;
}
/**
* The input type is inferred from the node types of the input nodes.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The input type.
*/
getInputType( builder ) {
const aType = this.aNode.getNodeType( builder );
const bType = this.bNode ? this.bNode.getNodeType( builder ) : null;
const cType = this.cNode ? this.cNode.getNodeType( builder ) : null;
const aLen = builder.isMatrix( aType ) ? 0 : builder.getTypeLength( aType );
const bLen = builder.isMatrix( bType ) ? 0 : builder.getTypeLength( bType );
const cLen = builder.isMatrix( cType ) ? 0 : builder.getTypeLength( cType );
if ( aLen > bLen && aLen > cLen ) {
return aType;
} else if ( bLen > cLen ) {
return bType;
} else if ( cLen > aLen ) {
return cType;
}
return aType;
}
/**
* The selected method as well as the input type determine the node type of this node.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The node type.
*/
getNodeType( builder ) {
const method = this.method;
if ( method === MathNode.LENGTH || method === MathNode.DISTANCE || method === MathNode.DOT ) {
return 'float';
} else if ( method === MathNode.CROSS ) {
return 'vec3';
} else if ( method === MathNode.ALL ) {
return 'bool';
} else if ( method === MathNode.EQUALS ) {
return builder.changeComponentType( this.aNode.getNodeType( builder ), 'bool' );
} else if ( method === MathNode.MOD ) {
return this.aNode.getNodeType( builder );
} else {
return this.getInputType( builder );
}
}
generate( builder, output ) {
let method = this.method;
const type = this.getNodeType( builder );
const inputType = this.getInputType( builder );
const a = this.aNode;
const b = this.bNode;
const c = this.cNode;
const coordinateSystem = builder.renderer.coordinateSystem;
if ( method === MathNode.TRANSFORM_DIRECTION ) {
// dir can be either a direction vector or a normal vector
// upper-left 3x3 of matrix is assumed to be orthogonal
let tA = a;
let tB = b;
if ( builder.isMatrix( tA.getNodeType( builder ) ) ) {
tB = vec4( vec3( tB ), 0.0 );
} else {
tA = vec4( vec3( tA ), 0.0 );
}
const mulNode = mul( tA, tB ).xyz;
return normalize( mulNode ).build( builder, output );
} else if ( method === MathNode.NEGATE ) {
return builder.format( '( - ' + a.build( builder, inputType ) + ' )', type, output );
} else if ( method === MathNode.ONE_MINUS ) {
return sub( 1.0, a ).build( builder, output );
} else if ( method === MathNode.RECIPROCAL ) {
return div( 1.0, a ).build( builder, output );
} else if ( method === MathNode.DIFFERENCE ) {
return abs( sub( a, b ) ).build( builder, output );
} else {
const params = [];
if ( method === MathNode.CROSS || method === MathNode.MOD ) {
params.push(
a.build( builder, type ),
b.build( builder, type )
);
} else if ( coordinateSystem === WebGLCoordinateSystem && method === MathNode.STEP ) {
params.push(
a.build( builder, builder.getTypeLength( a.getNodeType( builder ) ) === 1 ? 'float' : inputType ),
b.build( builder, inputType )
);
} else if ( ( coordinateSystem === WebGLCoordinateSystem && ( method === MathNode.MIN || method === MathNode.MAX ) ) || method === MathNode.MOD ) {
params.push(
a.build( builder, inputType ),
b.build( builder, builder.getTypeLength( b.getNodeType( builder ) ) === 1 ? 'float' : inputType )
);
} else if ( method === MathNode.REFRACT ) {
params.push(
a.build( builder, inputType ),
b.build( builder, inputType ),
c.build( builder, 'float' )
);
} else if ( method === MathNode.MIX ) {
params.push(
a.build( builder, inputType ),
b.build( builder, inputType ),
c.build( builder, builder.getTypeLength( c.getNodeType( builder ) ) === 1 ? 'float' : inputType )
);
} else {
if ( coordinateSystem === WebGPUCoordinateSystem && method === MathNode.ATAN && b !== null ) {
method = 'atan2';
}
params.push( a.build( builder, inputType ) );
if ( b !== null ) params.push( b.build( builder, inputType ) );
if ( c !== null ) params.push( c.build( builder, inputType ) );
}
return builder.format( `${ builder.getMethod( method, type ) }( ${params.join( ', ' )} )`, type, output );
}
}
serialize( data ) {
super.serialize( data );
data.method = this.method;
}
deserialize( data ) {
super.deserialize( data );
this.method = data.method;
}
}
// 1 input
MathNode.ALL = 'all';
MathNode.ANY = 'any';
MathNode.RADIANS = 'radians';
MathNode.DEGREES = 'degrees';
MathNode.EXP = 'exp';
MathNode.EXP2 = 'exp2';
MathNode.LOG = 'log';
MathNode.LOG2 = 'log2';
MathNode.SQRT = 'sqrt';
MathNode.INVERSE_SQRT = 'inversesqrt';
MathNode.FLOOR = 'floor';
MathNode.CEIL = 'ceil';
MathNode.NORMALIZE = 'normalize';
MathNode.FRACT = 'fract';
MathNode.SIN = 'sin';
MathNode.COS = 'cos';
MathNode.TAN = 'tan';
MathNode.ASIN = 'asin';
MathNode.ACOS = 'acos';
MathNode.ATAN = 'atan';
MathNode.ABS = 'abs';
MathNode.SIGN = 'sign';
MathNode.LENGTH = 'length';
MathNode.NEGATE = 'negate';
MathNode.ONE_MINUS = 'oneMinus';
MathNode.DFDX = 'dFdx';
MathNode.DFDY = 'dFdy';
MathNode.ROUND = 'round';
MathNode.RECIPROCAL = 'reciprocal';
MathNode.TRUNC = 'trunc';
MathNode.FWIDTH = 'fwidth';
MathNode.TRANSPOSE = 'transpose';
// 2 inputs
MathNode.BITCAST = 'bitcast';
MathNode.EQUALS = 'equals';
MathNode.MIN = 'min';
MathNode.MAX = 'max';
MathNode.MOD = 'mod';
MathNode.STEP = 'step';
MathNode.REFLECT = 'reflect';
MathNode.DISTANCE = 'distance';
MathNode.DIFFERENCE = 'difference';
MathNode.DOT = 'dot';
MathNode.CROSS = 'cross';
MathNode.POW = 'pow';
MathNode.TRANSFORM_DIRECTION = 'transformDirection';
// 3 inputs
MathNode.MIX = 'mix';
MathNode.CLAMP = 'clamp';
MathNode.REFRACT = 'refract';
MathNode.SMOOTHSTEP = 'smoothstep';
MathNode.FACEFORWARD = 'faceforward';
// 1 inputs
/**
* A small value used to handle floating-point precision errors.
*
* @type {Node<float>}
*/
const EPSILON = /*@__PURE__*/ float( 1e-6 );
/**
* Represents infinity.
*
* @type {Node<float>}
*/
const INFINITY = /*@__PURE__*/ float( 1e6 );
/**
* Represents PI.
*
* @type {Node<float>}
*/
const PI = /*@__PURE__*/ float( Math.PI );
/**
* Represents PI * 2.
*
* @type {Node<float>}
*/
const PI2 = /*@__PURE__*/ float( Math.PI * 2 );
/**
* Returns `true` if all components of `x` are `true`.
*
* @function
* @param {Node | Number} x - The parameter.
* @returns {Node<bool>}
*/
const all = /*@__PURE__*/ nodeProxy( MathNode, MathNode.ALL );
/**
* Returns `true` if any components of `x` are `true`.
*
* @function
* @param {Node | Number} x - The parameter.
* @returns {Node<bool>}
*/
const any = /*@__PURE__*/ nodeProxy( MathNode, MathNode.ANY );
/**
* Converts a quantity in degrees to radians.
*
* @function
* @param {Node | Number} x - The input in degrees.
* @returns {Node}
*/
const radians = /*@__PURE__*/ nodeProxy( MathNode, MathNode.RADIANS );
/**
* Convert a quantity in radians to degrees.
*
* @function
* @param {Node | Number} x - The input in radians.
* @returns {Node}
*/
const degrees = /*@__PURE__*/ nodeProxy( MathNode, MathNode.DEGREES );
/**
* Returns the natural exponentiation of the parameter.
*
* @function
* @param {Node | Number} x - The parameter.
* @returns {Node}
*/
const exp = /*@__PURE__*/ nodeProxy( MathNode, MathNode.EXP );
/**
* Returns 2 raised to the power of the parameter.
*
* @function
* @param {Node | Number} x - The parameter.
* @returns {Node}
*/
const exp2 = /*@__PURE__*/ nodeProxy( MathNode, MathNode.EXP2 );
/**
* Returns the natural logarithm of the parameter.
*
* @function
* @param {Node | Number} x - The parameter.
* @returns {Node}
*/
const log = /*@__PURE__*/ nodeProxy( MathNode, MathNode.LOG );
/**
* Returns the base 2 logarithm of the parameter.
*
* @function
* @param {Node | Number} x - The parameter.
* @returns {Node}
*/
const log2 = /*@__PURE__*/ nodeProxy( MathNode, MathNode.LOG2 );
/**
* Returns the square root of the parameter.
*
* @function
* @param {Node | Number} x - The parameter.
* @returns {Node}
*/
const sqrt = /*@__PURE__*/ nodeProxy( MathNode, MathNode.SQRT );
/**
* Returns the inverse of the square root of the parameter.
*
* @function
* @param {Node | Number} x - The parameter.
* @returns {Node}
*/
const inverseSqrt = /*@__PURE__*/ nodeProxy( MathNode, MathNode.INVERSE_SQRT );
/**
* Finds the nearest integer less than or equal to the parameter.
*
* @function
* @param {Node | Number} x - The parameter.
* @returns {Node}
*/
const floor = /*@__PURE__*/ nodeProxy( MathNode, MathNode.FLOOR );
/**
* Finds the nearest integer that is greater than or equal to the parameter.
*
* @function
* @param {Node | Number} x - The parameter.
* @returns {Node}
*/
const ceil = /*@__PURE__*/ nodeProxy( MathNode, MathNode.CEIL );
/**
* Calculates the unit vector in the same direction as the original vector.
*
* @function
* @param {Node} x - The input vector.
* @returns {Node}
*/
const normalize = /*@__PURE__*/ nodeProxy( MathNode, MathNode.NORMALIZE );
/**
* Computes the fractional part of the parameter.
*
* @function
* @param {Node | Number} x - The parameter.
* @returns {Node}
*/
const fract = /*@__PURE__*/ nodeProxy( MathNode, MathNode.FRACT );
/**
* Returns the sine of the parameter.
*
* @function
* @param {Node | Number} x - The parameter.
* @returns {Node}
*/
const sin = /*@__PURE__*/ nodeProxy( MathNode, MathNode.SIN );
/**
* Returns the cosine of the parameter.
*
* @function
* @param {Node | Number} x - The parameter.
* @returns {Node}
*/
const cos = /*@__PURE__*/ nodeProxy( MathNode, MathNode.COS );
/**
* Returns the tangent of the parameter.
*
* @function
* @param {Node | Number} x - The parameter.
* @returns {Node}
*/
const tan = /*@__PURE__*/ nodeProxy( MathNode, MathNode.TAN );
/**
* Returns the arcsine of the parameter.
*
* @function
* @param {Node | Number} x - The parameter.
* @returns {Node}
*/
const asin = /*@__PURE__*/ nodeProxy( MathNode, MathNode.ASIN );
/**
* Returns the arccosine of the parameter.
*
* @function
* @param {Node | Number} x - The parameter.
* @returns {Node}
*/
const acos = /*@__PURE__*/ nodeProxy( MathNode, MathNode.ACOS );
/**
* Returns the arc-tangent of the parameter.
* If two parameters are provided, the result is `atan2(y/x)`.
*
* @function
* @param {Node | Number} y - The y parameter.
* @param {(Node | Number)?} x - The x parameter.
* @returns {Node}
*/
const atan = /*@__PURE__*/ nodeProxy( MathNode, MathNode.ATAN );
/**
* Returns the absolute value of the parameter.
*
* @function
* @param {Node | Number} x - The parameter.
* @returns {Node}
*/
const abs = /*@__PURE__*/ nodeProxy( MathNode, MathNode.ABS );
/**
* Extracts the sign of the parameter.
*
* @function
* @param {Node | Number} x - The parameter.
* @returns {Node}
*/
const sign = /*@__PURE__*/ nodeProxy( MathNode, MathNode.SIGN );
/**
* Calculates the length of a vector.
*
* @function
* @param {Node} x - The parameter.
* @returns {Node<float>}
*/
const length = /*@__PURE__*/ nodeProxy( MathNode, MathNode.LENGTH );
/**
* Negates the value of the parameter (-x).
*
* @function
* @param {Node | Number} x - The parameter.
* @returns {Node}
*/
const negate = /*@__PURE__*/ nodeProxy( MathNode, MathNode.NEGATE );
/**
* Return `1` minus the parameter.
*
* @function
* @param {Node | Number} x - The parameter.
* @returns {Node}
*/
const oneMinus = /*@__PURE__*/ nodeProxy( MathNode, MathNode.ONE_MINUS );
/**
* Returns the partial derivative of the parameter with respect to x.
*
* @function
* @param {Node | Number} x - The parameter.
* @returns {Node}
*/
const dFdx = /*@__PURE__*/ nodeProxy( MathNode, MathNode.DFDX );
/**
* Returns the partial derivative of the parameter with respect to y.
*
* @function
* @param {Node | Number} x - The parameter.
* @returns {Node}
*/
const dFdy = /*@__PURE__*/ nodeProxy( MathNode, MathNode.DFDY );
/**
* Rounds the parameter to the nearest integer.
*
* @function
* @param {Node | Number} x - The parameter.
* @returns {Node}
*/
const round = /*@__PURE__*/ nodeProxy( MathNode, MathNode.ROUND );
/**
* Returns the reciprocal of the parameter `(1/x)`.
*
* @function
* @param {Node | Number} x - The parameter.
* @returns {Node}
*/
const reciprocal = /*@__PURE__*/ nodeProxy( MathNode, MathNode.RECIPROCAL );
/**
* Truncates the parameter, removing the fractional part.
*
* @function
* @param {Node | Number} x - The parameter.
* @returns {Node}
*/
const trunc = /*@__PURE__*/ nodeProxy( MathNode, MathNode.TRUNC );
/**
* Returns the sum of the absolute derivatives in x and y.
*
* @function
* @param {Node | Number} x - The parameter.
* @returns {Node}
*/
const fwidth = /*@__PURE__*/ nodeProxy( MathNode, MathNode.FWIDTH );
/**
* Returns the transpose of a matrix.
*
* @function
* @param {Node<mat2|mat3|mat4>} x - The parameter.
* @returns {Node}
*/
const transpose = /*@__PURE__*/ nodeProxy( MathNode, MathNode.TRANSPOSE );
// 2 inputs
/**
* Reinterpret the bit representation of a value in one type as a value in another type.
*
* @function
* @param {Node | Number} x - The parameter.
* @param {String} y - The new type.
* @returns {Node}
*/
const bitcast = /*@__PURE__*/ nodeProxy( MathNode, MathNode.BITCAST );
/**
* Returns `true` if `x` equals `y`.
*
* @function
* @param {Node | Number} x - The first parameter.
* @param {Node | Number} y - The second parameter.
* @returns {Node<bool>}
*/
const equals = /*@__PURE__*/ nodeProxy( MathNode, MathNode.EQUALS );
/**
* Returns the lesser of two values.
*
* @function
* @param {Node | Number} x - The y parameter.
* @param {Node | Number} y - The x parameter.
* @returns {Node}
*/
const min$1 = /*@__PURE__*/ nodeProxy( MathNode, MathNode.MIN );
/**
* Returns the greater of two values.
*
* @function
* @param {Node | Number} x - The y parameter.
* @param {Node | Number} y - The x parameter.
* @returns {Node}
*/
const max$1 = /*@__PURE__*/ nodeProxy( MathNode, MathNode.MAX );
/**
* Computes the remainder of dividing the first node by the second one.
*
* @function
* @param {Node | Number} x - The y parameter.
* @param {Node | Number} y - The x parameter.
* @returns {Node}
*/
const mod = /*@__PURE__*/ nodeProxy( MathNode, MathNode.MOD );
/**
* Generate a step function by comparing two values.
*
* @function
* @param {Node | Number} x - The y parameter.
* @param {Node | Number} y - The x parameter.
* @returns {Node}
*/
const step = /*@__PURE__*/ nodeProxy( MathNode, MathNode.STEP );
/**
* Calculates the reflection direction for an incident vector.
*
* @function
* @param {Node<vec2|vec3|vec4>} I - The incident vector.
* @param {Node<vec2|vec3|vec4>} N - The normal vector.
* @returns {Node<vec2|vec3|vec4>}
*/
const reflect = /*@__PURE__*/ nodeProxy( MathNode, MathNode.REFLECT );
/**
* Calculates the distance between two points.
*
* @function
* @param {Node<vec2|vec3|vec4>} x - The first point.
* @param {Node<vec2|vec3|vec4>} y - The second point.
* @returns {Node<float>}
*/
const distance = /*@__PURE__*/ nodeProxy( MathNode, MathNode.DISTANCE );
/**
* Calculates the absolute difference between two values.
*
* @function
* @param {Node | Number} x - The first parameter.
* @param {Node | Number} y - The second parameter.
* @returns {Node}
*/
const difference = /*@__PURE__*/ nodeProxy( MathNode, MathNode.DIFFERENCE );
/**
* Calculates the dot product of two vectors.
*
* @function
* @param {Node<vec2|vec3|vec4>} x - The first vector.
* @param {Node<vec2|vec3|vec4>} y - The second vector.
* @returns {Node<float>}
*/
const dot = /*@__PURE__*/ nodeProxy( MathNode, MathNode.DOT );
/**
* Calculates the cross product of two vectors.
*
* @function
* @param {Node<vec2|vec3|vec4>} x - The first vector.
* @param {Node<vec2|vec3|vec4>} y - The second vector.
* @returns {Node<vec2|vec3|vec4>}
*/
const cross = /*@__PURE__*/ nodeProxy( MathNode, MathNode.CROSS );
/**
* Return the value of the first parameter raised to the power of the second one.
*
* @function
* @param {Node | Number} x - The first parameter.
* @param {Node | Number} y - The second parameter.
* @returns {Node}
*/
const pow = /*@__PURE__*/ nodeProxy( MathNode, MathNode.POW );
/**
* Returns the square of the parameter.
*
* @function
* @param {Node | Number} x - The first parameter.
* @returns {Node}
*/
const pow2 = /*@__PURE__*/ nodeProxy( MathNode, MathNode.POW, 2 );
/**
* Returns the cube of the parameter.
*
* @function
* @param {Node | Number} x - The first parameter.
* @returns {Node}
*/
const pow3 = /*@__PURE__*/ nodeProxy( MathNode, MathNode.POW, 3 );
/**
* Returns the fourth power of the parameter.
*
* @function
* @param {Node | Number} x - The first parameter.
* @returns {Node}
*/
const pow4 = /*@__PURE__*/ nodeProxy( MathNode, MathNode.POW, 4 );
/**
* Transforms the direction of a vector by a matrix and then normalizes the result.
*
* @function
* @param {Node<vec2|vec3|vec4>} direction - The direction vector.
* @param {Node<mat2|mat3|mat4>} matrix - The transformation matrix.
* @returns {Node}
*/
const transformDirection = /*@__PURE__*/ nodeProxy( MathNode, MathNode.TRANSFORM_DIRECTION );
/**
* Returns the cube root of a number.
*
* @function
* @param {Node | Number} a - The first parameter.
* @returns {Node}
*/
const cbrt = ( a ) => mul( sign( a ), pow( abs( a ), 1.0 / 3.0 ) );
/**
* Calculate the squared length of a vector.
*
* @function
* @param {Node<vec2|vec3|vec4>} a - The vector.
* @returns {Node<float>}
*/
const lengthSq = ( a ) => dot( a, a );
/**
* Linearly interpolates between two values.
*
* @function
* @param {Node | Number} a - The first parameter.
* @param {Node | Number} b - The second parameter.
* @param {Node | Number} t - The interpolation value.
* @returns {Node}
*/
const mix = /*@__PURE__*/ nodeProxy( MathNode, MathNode.MIX );
/**
* Constrains a value to lie between two further values.
*
* @function
* @param {Node | Number} value - The value to constrain.
* @param {Node | Number} [low=0] - The lower bound.
* @param {Node | Number} [high=1] - The upper bound.
* @returns {Node}
*/
const clamp = ( value, low = 0, high = 1 ) => nodeObject( new MathNode( MathNode.CLAMP, nodeObject( value ), nodeObject( low ), nodeObject( high ) ) );
/**
* Constrains a value between `0` and `1`.
*
* @function
* @param {Node | Number} value - The value to constrain.
* @returns {Node}
*/
const saturate = ( value ) => clamp( value );
/**
* Calculates the refraction direction for an incident vector.
*
* @function
* @param {Node<vec2|vec3|vec4>} I - The incident vector.
* @param {Node<vec2|vec3|vec4>} N - The normal vector.
* @param {Node<float>} eta - The the ratio of indices of refraction.
* @returns {Node<vec2|vec3|vec4>}
*/
const refract = /*@__PURE__*/ nodeProxy( MathNode, MathNode.REFRACT );
/**
* Performs a Hermite interpolation between two values.
*
* @function
* @param {Node | Number} low - The value of the lower edge of the Hermite function.
* @param {Node | Number} high - The value of the upper edge of the Hermite function.
* @param {Node | Number} x - The source value for interpolation.
* @returns {Node}
*/
const smoothstep = /*@__PURE__*/ nodeProxy( MathNode, MathNode.SMOOTHSTEP );
/**
* Returns a vector pointing in the same direction as another.
*
* @function
* @param {Node<vec2|vec3|vec4>} N - The vector to orient.
* @param {Node<vec2|vec3|vec4>} I - The incident vector.
* @param {Node<vec2|vec3|vec4>} Nref - The reference vector.
* @returns {Node<vec2|vec3|vec4>}
*/
const faceForward = /*@__PURE__*/ nodeProxy( MathNode, MathNode.FACEFORWARD );
/**
* Returns a random value for the given uv.
*
* @function
* @param {Node<vec2>} uv - The uv node.
* @returns {Node<float>}
*/
const rand = /*@__PURE__*/ Fn( ( [ uv ] ) => {
const a = 12.9898, b = 78.233, c = 43758.5453;
const dt = dot( uv.xy, vec2( a, b ) ), sn = mod( dt, PI );
return fract( sin( sn ).mul( c ) );
} );
/**
* Alias for `mix()` with a different parameter order.
*
* @function
* @param {Node | Number} t - The interpolation value.
* @param {Node | Number} e1 - The first parameter.
* @param {Node | Number} e2 - The second parameter.
* @returns {Node}
*/
const mixElement = ( t, e1, e2 ) => mix( e1, e2, t );
/**
* Alias for `smoothstep()` with a different parameter order.
*
* @function
* @param {Node | Number} x - The source value for interpolation.
* @param {Node | Number} low - The value of the lower edge of the Hermite function.
* @param {Node | Number} high - The value of the upper edge of the Hermite function.
* @returns {Node}
*/
const smoothstepElement = ( x, low, high ) => smoothstep( low, high, x );
/**
* Returns the arc-tangent of the quotient of its parameters.
*
* @function
* @deprecated since r172. Use {@link atan} instead.
*
* @param {Node | Number} y - The y parameter.
* @param {Node | Number} x - The x parameter.
* @returns {Node}
*/
const atan2 = ( y, x ) => { // @deprecated, r172
console.warn( 'THREE.TSL: "atan2" is overloaded. Use "atan" instead.' );
return atan( y, x );
};
// GLSL alias function
const faceforward = faceForward;
const inversesqrt = inverseSqrt;
// Method chaining
addMethodChaining( 'all', all );
addMethodChaining( 'any', any );
addMethodChaining( 'equals', equals );
addMethodChaining( 'radians', radians );
addMethodChaining( 'degrees', degrees );
addMethodChaining( 'exp', exp );
addMethodChaining( 'exp2', exp2 );
addMethodChaining( 'log', log );
addMethodChaining( 'log2', log2 );
addMethodChaining( 'sqrt', sqrt );
addMethodChaining( 'inverseSqrt', inverseSqrt );
addMethodChaining( 'floor', floor );
addMethodChaining( 'ceil', ceil );
addMethodChaining( 'normalize', normalize );
addMethodChaining( 'fract', fract );
addMethodChaining( 'sin', sin );
addMethodChaining( 'cos', cos );
addMethodChaining( 'tan', tan );
addMethodChaining( 'asin', asin );
addMethodChaining( 'acos', acos );
addMethodChaining( 'atan', atan );
addMethodChaining( 'abs', abs );
addMethodChaining( 'sign', sign );
addMethodChaining( 'length', length );
addMethodChaining( 'lengthSq', lengthSq );
addMethodChaining( 'negate', negate );
addMethodChaining( 'oneMinus', oneMinus );
addMethodChaining( 'dFdx', dFdx );
addMethodChaining( 'dFdy', dFdy );
addMethodChaining( 'round', round );
addMethodChaining( 'reciprocal', reciprocal );
addMethodChaining( 'trunc', trunc );
addMethodChaining( 'fwidth', fwidth );
addMethodChaining( 'atan2', atan2 );
addMethodChaining( 'min', min$1 );
addMethodChaining( 'max', max$1 );
addMethodChaining( 'mod', mod );
addMethodChaining( 'step', step );
addMethodChaining( 'reflect', reflect );
addMethodChaining( 'distance', distance );
addMethodChaining( 'dot', dot );
addMethodChaining( 'cross', cross );
addMethodChaining( 'pow', pow );
addMethodChaining( 'pow2', pow2 );
addMethodChaining( 'pow3', pow3 );
addMethodChaining( 'pow4', pow4 );
addMethodChaining( 'transformDirection', transformDirection );
addMethodChaining( 'mix', mixElement );
addMethodChaining( 'clamp', clamp );
addMethodChaining( 'refract', refract );
addMethodChaining( 'smoothstep', smoothstepElement );
addMethodChaining( 'faceForward', faceForward );
addMethodChaining( 'difference', difference );
addMethodChaining( 'saturate', saturate );
addMethodChaining( 'cbrt', cbrt );
addMethodChaining( 'transpose', transpose );
addMethodChaining( 'rand', rand );
/** @module ConditionalNode **/
/**
* Represents a logical `if/else` statement. Can be used as an alternative
* to the `If()`/`Else()` syntax.
*
* The corresponding TSL `select()` looks like so:
* ```js
* velocity = position.greaterThanEqual( limit ).select( velocity.negate(), velocity );
* ```
* The `select()` method is called in a chaining fashion on a condition. The parameter nodes of `select()`
* determine the outcome of the entire statement.
*
* @augments Node
*/
class ConditionalNode extends Node {
static get type() {
return 'ConditionalNode';
}
/**
* Constructs a new conditional node.
*
* @param {Node} condNode - The node that defines the condition.
* @param {Node} ifNode - The node that is evaluate when the condition ends up `true`.
* @param {Node?} [elseNode=null] - The node that is evaluate when the condition ends up `false`.
*/
constructor( condNode, ifNode, elseNode = null ) {
super();
/**
* The node that defines the condition.
*
* @type {Node}
*/
this.condNode = condNode;
/**
* The node that is evaluate when the condition ends up `true`.
*
* @type {Node}
*/
this.ifNode = ifNode;
/**
* The node that is evaluate when the condition ends up `false`.
*
* @type {Node?}
* @default null
*/
this.elseNode = elseNode;
}
/**
* This method is overwritten since the node type is inferred from the if/else
* nodes.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The node type.
*/
getNodeType( builder ) {
const { ifNode, elseNode } = builder.getNodeProperties( this );
if ( ifNode === undefined ) {
// fallback setup
this.setup( builder );
return this.getNodeType( builder );
}
const ifType = ifNode.getNodeType( builder );
if ( elseNode !== null ) {
const elseType = elseNode.getNodeType( builder );
if ( builder.getTypeLength( elseType ) > builder.getTypeLength( ifType ) ) {
return elseType;
}
}
return ifType;
}
setup( builder ) {
const condNode = this.condNode.cache();
const ifNode = this.ifNode.cache();
const elseNode = this.elseNode ? this.elseNode.cache() : null;
//
const currentNodeBlock = builder.context.nodeBlock;
builder.getDataFromNode( ifNode ).parentNodeBlock = currentNodeBlock;
if ( elseNode !== null ) builder.getDataFromNode( elseNode ).parentNodeBlock = currentNodeBlock;
//
const properties = builder.getNodeProperties( this );
properties.condNode = condNode;
properties.ifNode = ifNode.context( { nodeBlock: ifNode } );
properties.elseNode = elseNode ? elseNode.context( { nodeBlock: elseNode } ) : null;
}
generate( builder, output ) {
const type = this.getNodeType( builder );
const nodeData = builder.getDataFromNode( this );
if ( nodeData.nodeProperty !== undefined ) {
return nodeData.nodeProperty;
}
const { condNode, ifNode, elseNode } = builder.getNodeProperties( this );
const needsOutput = output !== 'void';
const nodeProperty = needsOutput ? property( type ).build( builder ) : '';
nodeData.nodeProperty = nodeProperty;
const nodeSnippet = condNode.build( builder, 'bool' );
builder.addFlowCode( `\n${ builder.tab }if ( ${ nodeSnippet } ) {\n\n` ).addFlowTab();
let ifSnippet = ifNode.build( builder, type );
if ( ifSnippet ) {
if ( needsOutput ) {
ifSnippet = nodeProperty + ' = ' + ifSnippet + ';';
} else {
ifSnippet = 'return ' + ifSnippet + ';';
}
}
builder.removeFlowTab().addFlowCode( builder.tab + '\t' + ifSnippet + '\n\n' + builder.tab + '}' );
if ( elseNode !== null ) {
builder.addFlowCode( ' else {\n\n' ).addFlowTab();
let elseSnippet = elseNode.build( builder, type );
if ( elseSnippet ) {
if ( needsOutput ) {
elseSnippet = nodeProperty + ' = ' + elseSnippet + ';';
} else {
elseSnippet = 'return ' + elseSnippet + ';';
}
}
builder.removeFlowTab().addFlowCode( builder.tab + '\t' + elseSnippet + '\n\n' + builder.tab + '}\n\n' );
} else {
builder.addFlowCode( '\n\n' );
}
return builder.format( nodeProperty, type, output );
}
}
/**
* TSL function for creating a conditional node.
*
* @function
* @param {Node} condNode - The node that defines the condition.
* @param {Node} ifNode - The node that is evaluate when the condition ends up `true`.
* @param {Node?} [elseNode=null] - The node that is evaluate when the condition ends up `false`.
* @returns {ConditionalNode}
*/
const select = /*@__PURE__*/ nodeProxy( ConditionalNode );
addMethodChaining( 'select', select );
// Deprecated
/**
* @function
* @deprecated since r168. Use {@link select} instead.
*
* @param {...any} params
* @returns {ConditionalNode}
*/
const cond = ( ...params ) => { // @deprecated, r168
console.warn( 'TSL.ConditionalNode: cond() has been renamed to select().' );
return select( ...params );
};
addMethodChaining( 'cond', cond );
/** @module ContextNode **/
/**
* This node can be used as a context management component for another node.
* {@link NodeBuilder} performs its node building process in a specific context and
* this node allows the modify the context. A typical use case is to overwrite `getUV()` e.g.:
*
* ```js
*node.context( { getUV: () => customCoord } );
*```
* @augments Node
*/
class ContextNode extends Node {
static get type() {
return 'ContextNode';
}
/**
* Constructs a new context node.
*
* @param {Node} node - The node whose context should be modified.
* @param {Object} [value={}] - The modified context data.
*/
constructor( node, value = {} ) {
super();
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isContextNode = true;
/**
* The node whose context should be modified.
*
* @type {Node}
*/
this.node = node;
/**
* The modified context data.
*
* @type {Object}
* @default {}
*/
this.value = value;
}
/**
* This method is overwritten to ensure it returns the reference to {@link module:ContextNode~ContextNode#node}.
*
* @return {Node} A reference to {@link module:ContextNode~ContextNode#node}.
*/
getScope() {
return this.node.getScope();
}
/**
* This method is overwritten to ensure it returns the type of {@link module:ContextNode~ContextNode#node}.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The node type.
*/
getNodeType( builder ) {
return this.node.getNodeType( builder );
}
analyze( builder ) {
this.node.build( builder );
}
setup( builder ) {
const previousContext = builder.getContext();
builder.setContext( { ...builder.context, ...this.value } );
const node = this.node.build( builder );
builder.setContext( previousContext );
return node;
}
generate( builder, output ) {
const previousContext = builder.getContext();
builder.setContext( { ...builder.context, ...this.value } );
const snippet = this.node.build( builder, output );
builder.setContext( previousContext );
return snippet;
}
}
/**
* TSL function for creating a context node.
*
* @function
* @param {Node} node - The node whose context should be modified.
* @param {Object} [value={}] - The modified context data.
* @returns {ContextNode}
*/
const context = /*@__PURE__*/ nodeProxy( ContextNode );
/**
* TSL function for defining a label context value for a given node.
*
* @function
* @param {Node} node - The node whose context should be modified.
* @param {String} name - The name/label to set.
* @returns {ContextNode}
*/
const label = ( node, name ) => context( node, { label: name } );
addMethodChaining( 'context', context );
addMethodChaining( 'label', label );
/** @module VarNode **/
/**
* Class for representing shader variables as nodes. Variables are created from
* existing nodes like the following:
*
* ```js
* const depth = sampleDepth( uvNode ).toVar( 'depth' );
* ```
*
* @augments Node
*/
class VarNode extends Node {
static get type() {
return 'VarNode';
}
/**
* Constructs a new variable node.
*
* @param {Node} node - The node for which a variable should be created.
* @param {String?} name - The name of the variable in the shader.
* @param {Boolean?} readOnly - The read-only flag.
*/
constructor( node, name = null, readOnly = false ) {
super();
/**
* The node for which a variable should be created.
*
* @type {Node}
*/
this.node = node;
/**
* The name of the variable in the shader. If no name is defined,
* the node system auto-generates one.
*
* @type {String?}
* @default null
*/
this.name = name;
/**
* `VarNode` sets this property to `true` by default.
*
* @type {Boolean}
* @default true
*/
this.global = true;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isVarNode = true;
/**
*
* The read-only flag.
*
* @type {Boolean}
* @default false
*/
this.readOnly = readOnly;
}
getHash( builder ) {
return this.name || super.getHash( builder );
}
getMemberType( builder, name ) {
return this.node.getMemberType( builder, name );
}
getElementType( builder ) {
return this.node.getElementType( builder );
}
getNodeType( builder ) {
return this.node.getNodeType( builder );
}
generate( builder ) {
const { node, name, readOnly } = this;
const { renderer } = builder;
const isWebGPUBackend = renderer.backend.isWebGPUBackend === true;
let isDeterministic = false;
let shouldTreatAsReadOnly = false;
if ( readOnly ) {
isDeterministic = builder.isDeterministic( node );
shouldTreatAsReadOnly = isWebGPUBackend ? readOnly : isDeterministic;
}
const vectorType = builder.getVectorType( this.getNodeType( builder ) );
const snippet = node.build( builder, vectorType );
const nodeVar = builder.getVarFromNode( this, name, vectorType, undefined, shouldTreatAsReadOnly );
const propertyName = builder.getPropertyName( nodeVar );
let declarationPrefix = propertyName;
if ( shouldTreatAsReadOnly ) {
if ( isWebGPUBackend ) {
declarationPrefix = isDeterministic
? `const ${ propertyName }`
: `let ${ propertyName }`;
} else {
const count = builder.getArrayCount( node );
declarationPrefix = `const ${ builder.getVar( nodeVar.type, propertyName, count ) }`;
}
}
builder.addLineFlowCode( `${ declarationPrefix } = ${ snippet }`, this );
return propertyName;
}
}
/**
* TSL function for creating a var node.
*
* @function
* @param {Node} node - The node for which a variable should be created.
* @param {String?} name - The name of the variable in the shader.
* @returns {VarNode}
*/
const createVar = /*@__PURE__*/ nodeProxy( VarNode );
/**
* TSL function for creating a var node.
*
* @function
* @param {Node} node - The node for which a variable should be created.
* @param {String?} name - The name of the variable in the shader.
* @returns {VarNode}
*/
const Var = ( node, name = null ) => createVar( node, name ).append();
/**
* TSL function for creating a const node.
*
* @function
* @param {Node} node - The node for which a constant should be created.
* @param {String?} name - The name of the constant in the shader.
* @returns {VarNode}
*/
const Const = ( node, name = null ) => createVar( node, name, true ).append();
// Method chaining
addMethodChaining( 'toVar', Var );
addMethodChaining( 'toConst', Const );
// Deprecated
/**
* @function
* @deprecated since r170. Use `Var( node )` or `node.toVar()` instead.
*
* @param {Any} node
* @returns {VarNode}
*/
const temp = ( node ) => { // @deprecated, r170
console.warn( 'TSL: "temp( node )" is deprecated. Use "Var( node )" or "node.toVar()" instead.' );
return createVar( node );
};
addMethodChaining( 'temp', temp );
/** @module VaryingNode **/
/**
* Class for representing shader varyings as nodes. Varyings are create from
* existing nodes like the following:
*
* ```js
* const positionLocal = positionGeometry.toVarying( 'vPositionLocal' );
* ```
*
* @augments Node
*/
class VaryingNode extends Node {
static get type() {
return 'VaryingNode';
}
/**
* Constructs a new varying node.
*
* @param {Node} node - The node for which a varying should be created.
* @param {String?} name - The name of the varying in the shader.
*/
constructor( node, name = null ) {
super();
/**
* The node for which a varying should be created.
*
* @type {Node}
*/
this.node = node;
/**
* The name of the varying in the shader. If no name is defined,
* the node system auto-generates one.
*
* @type {String?}
* @default null
*/
this.name = name;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isVaryingNode = true;
}
/**
* The method is overwritten so it always returns `true`.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {Boolean} Whether this node is global or not.
*/
isGlobal( /*builder*/ ) {
return true;
}
getHash( builder ) {
return this.name || super.getHash( builder );
}
getNodeType( builder ) {
// VaryingNode is auto type
return this.node.getNodeType( builder );
}
/**
* This method performs the setup of a varying node with the current node builder.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {NodeVarying} The node varying from the node builder.
*/
setupVarying( builder ) {
const properties = builder.getNodeProperties( this );
let varying = properties.varying;
if ( varying === undefined ) {
const name = this.name;
const type = this.getNodeType( builder );
properties.varying = varying = builder.getVaryingFromNode( this, name, type );
properties.node = this.node;
}
// this property can be used to check if the varying can be optimized for a variable
varying.needsInterpolation || ( varying.needsInterpolation = ( builder.shaderStage === 'fragment' ) );
return varying;
}
setup( builder ) {
this.setupVarying( builder );
}
analyze( builder ) {
this.setupVarying( builder );
return this.node.analyze( builder );
}
generate( builder ) {
const properties = builder.getNodeProperties( this );
const varying = this.setupVarying( builder );
const needsReassign = builder.shaderStage === 'fragment' && properties.reassignPosition === true && builder.context.needsPositionReassign;
if ( properties.propertyName === undefined || needsReassign ) {
const type = this.getNodeType( builder );
const propertyName = builder.getPropertyName( varying, NodeShaderStage.VERTEX );
// force node run in vertex stage
builder.flowNodeFromShaderStage( NodeShaderStage.VERTEX, this.node, type, propertyName );
properties.propertyName = propertyName;
if ( needsReassign ) {
// once reassign varying in fragment stage
properties.reassignPosition = false;
} else if ( properties.reassignPosition === undefined && builder.context.isPositionNodeInput ) {
properties.reassignPosition = true;
}
}
return builder.getPropertyName( varying );
}
}
/**
* TSL function for creating a varying node.
*
* @function
* @param {Node} node - The node for which a varying should be created.
* @param {String?} name - The name of the varying in the shader.
* @returns {VaryingNode}
*/
const varying = /*@__PURE__*/ nodeProxy( VaryingNode );
/**
* Computes a node in the vertex stage.
*
* @function
* @param {Node} node - The node which should be executed in the vertex stage.
* @returns {VaryingNode}
*/
const vertexStage = ( node ) => varying( node );
addMethodChaining( 'toVarying', varying );
addMethodChaining( 'toVertexStage', vertexStage );
// Deprecated
addMethodChaining( 'varying', ( ...params ) => { // @deprecated, r173
console.warn( 'TSL.VaryingNode: .varying() has been renamed to .toVarying().' );
return varying( ...params );
} );
addMethodChaining( 'vertexStage', ( ...params ) => { // @deprecated, r173
console.warn( 'TSL.VaryingNode: .vertexStage() has been renamed to .toVertexStage().' );
return varying( ...params );
} );
/** @module ColorSpaceFunctions **/
/**
* Converts the given color value from sRGB to linear-sRGB color space.
*
* @method
* @param {Node<vec3>} color - The sRGB color.
* @return {Node<vec3>} The linear-sRGB color.
*/
const sRGBTransferEOTF = /*@__PURE__*/ Fn( ( [ color ] ) => {
const a = color.mul( 0.9478672986 ).add( 0.0521327014 ).pow( 2.4 );
const b = color.mul( 0.0773993808 );
const factor = color.lessThanEqual( 0.04045 );
const rgbResult = mix( a, b, factor );
return rgbResult;
} ).setLayout( {
name: 'sRGBTransferEOTF',
type: 'vec3',
inputs: [
{ name: 'color', type: 'vec3' }
]
} );
/**
* Converts the given color value from linear-sRGB to sRGB color space.
*
* @method
* @param {Node<vec3>} color - The linear-sRGB color.
* @return {Node<vec3>} The sRGB color.
*/
const sRGBTransferOETF = /*@__PURE__*/ Fn( ( [ color ] ) => {
const a = color.pow( 0.41666 ).mul( 1.055 ).sub( 0.055 );
const b = color.mul( 12.92 );
const factor = color.lessThanEqual( 0.0031308 );
const rgbResult = mix( a, b, factor );
return rgbResult;
} ).setLayout( {
name: 'sRGBTransferOETF',
type: 'vec3',
inputs: [
{ name: 'color', type: 'vec3' }
]
} );
/** @module ColorSpaceNode **/
const WORKING_COLOR_SPACE = 'WorkingColorSpace';
const OUTPUT_COLOR_SPACE = 'OutputColorSpace';
/**
* This node represents a color space conversion. Meaning it converts
* a color value from a source to a target color space.
*
* @augments TempNode
*/
class ColorSpaceNode extends TempNode {
static get type() {
return 'ColorSpaceNode';
}
/**
* Constructs a new color space node.
*
* @param {Node} colorNode - Represents the color to convert.
* @param {String} source - The source color space.
* @param {String} target - The target color space.
*/
constructor( colorNode, source, target ) {
super( 'vec4' );
/**
* Represents the color to convert.
*
* @type {Node}
*/
this.colorNode = colorNode;
/**
* The source color space.
*
* @type {String}
*/
this.source = source;
/**
* The target color space.
*
* @type {String}
*/
this.target = target;
}
/**
* This method resolves the constants `WORKING_COLOR_SPACE` and
* `OUTPUT_COLOR_SPACE` based on the current configuration of the
* color management and renderer.
*
* @param {NodeBuilder} builder - The current node builder.
* @param {String} colorSpace - The color space to resolve.
* @return {String} The resolved color space.
*/
resolveColorSpace( builder, colorSpace ) {
if ( colorSpace === WORKING_COLOR_SPACE ) {
return ColorManagement.workingColorSpace;
} else if ( colorSpace === OUTPUT_COLOR_SPACE ) {
return builder.context.outputColorSpace || builder.renderer.outputColorSpace;
}
return colorSpace;
}
setup( builder ) {
const { colorNode } = this;
const source = this.resolveColorSpace( builder, this.source );
const target = this.resolveColorSpace( builder, this.target );
let outputNode = colorNode;
if ( ColorManagement.enabled === false || source === target || ! source || ! target ) {
return outputNode;
}
if ( ColorManagement.getTransfer( source ) === SRGBTransfer ) {
outputNode = vec4( sRGBTransferEOTF( outputNode.rgb ), outputNode.a );
}
if ( ColorManagement.getPrimaries( source ) !== ColorManagement.getPrimaries( target ) ) {
outputNode = vec4(
mat3( ColorManagement._getMatrix( new Matrix3(), source, target ) ).mul( outputNode.rgb ),
outputNode.a
);
}
if ( ColorManagement.getTransfer( target ) === SRGBTransfer ) {
outputNode = vec4( sRGBTransferOETF( outputNode.rgb ), outputNode.a );
}
return outputNode;
}
}
/**
* TSL function for converting a given color node to the current output color space.
*
* @function
* @param {Node} node - Represents the node to convert.
* @returns {ColorSpaceNode}
*/
const toOutputColorSpace = ( node ) => nodeObject( new ColorSpaceNode( nodeObject( node ), WORKING_COLOR_SPACE, OUTPUT_COLOR_SPACE ) );
/**
* TSL function for converting a given color node to the current working color space.
*
* @function
* @param {Node} node - Represents the node to convert.
* @returns {ColorSpaceNode}
*/
const toWorkingColorSpace = ( node ) => nodeObject( new ColorSpaceNode( nodeObject( node ), OUTPUT_COLOR_SPACE, WORKING_COLOR_SPACE ) );
/**
* TSL function for converting a given color node from the current working color space to the given color space.
*
* @function
* @param {Node} node - Represents the node to convert.
* @param {String} colorSpace - The target color space.
* @returns {ColorSpaceNode}
*/
const workingToColorSpace = ( node, colorSpace ) => nodeObject( new ColorSpaceNode( nodeObject( node ), WORKING_COLOR_SPACE, colorSpace ) );
/**
* TSL function for converting a given color node from the given color space to the current working color space.
*
* @function
* @param {Node} node - Represents the node to convert.
* @param {String} colorSpace - The source color space.
* @returns {ColorSpaceNode}
*/
const colorSpaceToWorking = ( node, colorSpace ) => nodeObject( new ColorSpaceNode( nodeObject( node ), colorSpace, WORKING_COLOR_SPACE ) );
/**
* TSL function for converting a given color node from one color space to another one.
*
* @function
* @param {Node} node - Represents the node to convert.
* @param {String} sourceColorSpace - The source color space.
* @param {String} targetColorSpace - The target color space.
* @returns {ColorSpaceNode}
*/
const convertColorSpace = ( node, sourceColorSpace, targetColorSpace ) => nodeObject( new ColorSpaceNode( nodeObject( node ), sourceColorSpace, targetColorSpace ) );
addMethodChaining( 'toOutputColorSpace', toOutputColorSpace );
addMethodChaining( 'toWorkingColorSpace', toWorkingColorSpace );
addMethodChaining( 'workingToColorSpace', workingToColorSpace );
addMethodChaining( 'colorSpaceToWorking', colorSpaceToWorking );
// TODO: Avoid duplicated code and ues only ReferenceBaseNode or ReferenceNode
/** @module ReferenceBaseNode **/
/**
* This class is only relevant if the referenced property is array-like.
* In this case, `ReferenceElementNode` allows to refer to a specific
* element inside the data structure via an index.
*
* @augments ArrayElementNode
*/
let ReferenceElementNode$1 = class ReferenceElementNode extends ArrayElementNode {
static get type() {
return 'ReferenceElementNode';
}
/**
* Constructs a new reference element node.
*
* @param {ReferenceBaseNode?} referenceNode - The reference node.
* @param {Node} indexNode - The index node that defines the element access.
*/
constructor( referenceNode, indexNode ) {
super( referenceNode, indexNode );
/**
* Similar to {@link module:ReferenceBaseNode~ReferenceBaseNode#reference}, an additional
* property references to the current node.
*
* @type {ReferenceBaseNode?}
* @default null
*/
this.referenceNode = referenceNode;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isReferenceElementNode = true;
}
/**
* This method is overwritten since the node type is inferred from
* the uniform type of the reference node.
*
* @return {String} The node type.
*/
getNodeType() {
return this.referenceNode.uniformType;
}
generate( builder ) {
const snippet = super.generate( builder );
const arrayType = this.referenceNode.getNodeType();
const elementType = this.getNodeType();
return builder.format( snippet, arrayType, elementType );
}
};
/**
* Base class for nodes which establishes a reference to a property of another object.
* In this way, the value of the node is automatically linked to the value of
* referenced object. Reference nodes internally represent the linked value
* as a uniform.
*
* @augments Node
*/
class ReferenceBaseNode extends Node {
static get type() {
return 'ReferenceBaseNode';
}
/**
* Constructs a new reference base node.
*
* @param {String} property - The name of the property the node refers to.
* @param {String} uniformType - The uniform type that should be used to represent the property value.
* @param {Object?} [object=null] - The object the property belongs to.
* @param {Number?} [count=null] - When the linked property is an array-like, this parameter defines its length.
*/
constructor( property, uniformType, object = null, count = null ) {
super();
/**
* The name of the property the node refers to.
*
* @type {String}
*/
this.property = property;
/**
* The uniform type that should be used to represent the property value.
*
* @type {String}
*/
this.uniformType = uniformType;
/**
* The object the property belongs to.
*
* @type {Object?}
* @default null
*/
this.object = object;
/**
* When the linked property is an array, this parameter defines its length.
*
* @type {Number?}
* @default null
*/
this.count = count;
/**
* The property name might have dots so nested properties can be referred.
* The hierarchy of the names is stored inside this array.
*
* @type {Array<String>}
*/
this.properties = property.split( '.' );
/**
* Points to the current referred object. This property exists next to {@link module:ReferenceNode~ReferenceNode#object}
* since the final reference might be updated from calling code.
*
* @type {Object?}
* @default null
*/
this.reference = object;
/**
* The uniform node that holds the value of the reference node.
*
* @type {UniformNode}
* @default null
*/
this.node = null;
/**
* The uniform group of the internal uniform.
*
* @type {UniformGroupNode}
* @default null
*/
this.group = null;
/**
* Overwritten since reference nodes are updated per object.
*
* @type {String}
* @default 'object'
*/
this.updateType = NodeUpdateType.OBJECT;
}
/**
* Sets the uniform group for this reference node.
*
* @param {UniformGroupNode} group - The uniform group to set.
* @return {ReferenceBaseNode} A reference to this node.
*/
setGroup( group ) {
this.group = group;
return this;
}
/**
* When the referred property is array-like, this method can be used
* to access elements via an index node.
*
* @param {IndexNode} indexNode - indexNode.
* @return {ReferenceElementNode} A reference to an element.
*/
element( indexNode ) {
return nodeObject( new ReferenceElementNode$1( this, nodeObject( indexNode ) ) );
}
/**
* Sets the node type which automatically defines the internal
* uniform type.
*
* @param {String} uniformType - The type to set.
*/
setNodeType( uniformType ) {
const node = uniform( null, uniformType ).getSelf();
if ( this.group !== null ) {
node.setGroup( this.group );
}
this.node = node;
}
/**
* This method is overwritten since the node type is inferred from
* the type of the reference node.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The node type.
*/
getNodeType( builder ) {
if ( this.node === null ) {
this.updateReference( builder );
this.updateValue();
}
return this.node.getNodeType( builder );
}
/**
* Returns the property value from the given referred object.
*
* @param {Object} [object=this.reference] - The object to retrieve the property value from.
* @return {Any} The value.
*/
getValueFromReference( object = this.reference ) {
const { properties } = this;
let value = object[ properties[ 0 ] ];
for ( let i = 1; i < properties.length; i ++ ) {
value = value[ properties[ i ] ];
}
return value;
}
/**
* Allows to update the reference based on the given state. The state is only
* evaluated {@link module:ReferenceBaseNode~ReferenceBaseNode#object} is not set.
*
* @param {(NodeFrame|NodeBuilder)} state - The current state.
* @return {Object} The updated reference.
*/
updateReference( state ) {
this.reference = this.object !== null ? this.object : state.object;
return this.reference;
}
/**
* The output of the reference node is the internal uniform node.
*
* @return {UniformNode} The output node.
*/
setup() {
this.updateValue();
return this.node;
}
/**
* Overwritten to to update the internal uniform value.
*
* @param {NodeFrame} frame - A reference to the current node frame.
*/
update( /*frame*/ ) {
this.updateValue();
}
/**
* Retrieves the value from the referred object property and uses it
* to updated the internal uniform.
*/
updateValue() {
if ( this.node === null ) this.setNodeType( this.uniformType );
const value = this.getValueFromReference();
if ( Array.isArray( value ) ) {
this.node.array = value;
} else {
this.node.value = value;
}
}
}
/**
* TSL function for creating a reference base node.
*
* @function
* @param {String} name - The name of the property the node refers to.
* @param {String} type - The uniform type that should be used to represent the property value.
* @param {Object} object - The object the property belongs to.
* @returns {ReferenceBaseNode}
*/
const reference$1 = ( name, type, object ) => nodeObject( new ReferenceBaseNode( name, type, object ) );
/** @module RendererReferenceNode **/
/**
* This node is a special type of reference node which is intended
* for linking renderer properties with node values.
* ```js
* const exposureNode = rendererReference( 'toneMappingExposure', 'float', renderer );
* ```
* When changing `renderer.toneMappingExposure`, the node value of `exposureNode` will
* automatically be updated.
*
* @augments ReferenceBaseNode
*/
class RendererReferenceNode extends ReferenceBaseNode {
static get type() {
return 'RendererReferenceNode';
}
/**
* Constructs a new renderer reference node.
*
* @param {String} property - The name of the property the node refers to.
* @param {String} inputType - The uniform type that should be used to represent the property value.
* @param {Renderer?} [renderer=null] - The renderer the property belongs to. When no renderer is set,
* the node refers to the renderer of the current state.
*/
constructor( property, inputType, renderer = null ) {
super( property, inputType, renderer );
/**
* The renderer the property belongs to. When no renderer is set,
* the node refers to the renderer of the current state.
*
* @type {Renderer?}
* @default null
*/
this.renderer = renderer;
this.setGroup( renderGroup );
}
/**
* Updates the reference based on the given state. The state is only evaluated
* {@link module:RendererReferenceNode~RendererReferenceNode#renderer} is not set.
*
* @param {(NodeFrame|NodeBuilder)} state - The current state.
* @return {Object} The updated reference.
*/
updateReference( state ) {
this.reference = this.renderer !== null ? this.renderer : state.renderer;
return this.reference;
}
}
/**
* TSL function for creating a renderer reference node.
*
* @function
* @param {String} name - The name of the property the node refers to.
* @param {String} type - The uniform type that should be used to represent the property value.
* @param {Renderer?} [renderer=null] - The renderer the property belongs to. When no renderer is set,
* the node refers to the renderer of the current state.
* @returns {RendererReferenceNode}
*/
const rendererReference = ( name, type, renderer = null ) => nodeObject( new RendererReferenceNode( name, type, renderer ) );
/** @module ToneMappingNode **/
/**
* This node represents a tone mapping operation.
*
* @augments TempNode
*/
class ToneMappingNode extends TempNode {
static get type() {
return 'ToneMappingNode';
}
/**
* Constructs a new tone mapping node.
*
* @param {Number} toneMapping - The tone mapping type.
* @param {Node} exposureNode - The tone mapping exposure.
* @param {Node} [colorNode=null] - The color node to process.
*/
constructor( toneMapping, exposureNode = toneMappingExposure, colorNode = null ) {
super( 'vec3' );
/**
* The tone mapping type.
*
* @type {Number}
*/
this.toneMapping = toneMapping;
/**
* The tone mapping exposure.
*
* @type {Node}
* @default null
*/
this.exposureNode = exposureNode;
/**
* Represents the color to process.
*
* @type {Node?}
* @default null
*/
this.colorNode = colorNode;
}
/**
* Overwrites the default `customCacheKey()` implementation by including the tone
* mapping type into the cache key.
*
* @return {Number} The hash.
*/
customCacheKey() {
return hash$1( this.toneMapping );
}
setup( builder ) {
const colorNode = this.colorNode || builder.context.color;
const toneMapping = this.toneMapping;
if ( toneMapping === NoToneMapping ) return colorNode;
let outputNode = null;
const toneMappingFn = builder.renderer.library.getToneMappingFunction( toneMapping );
if ( toneMappingFn !== null ) {
outputNode = vec4( toneMappingFn( colorNode.rgb, this.exposureNode ), colorNode.a );
} else {
console.error( 'ToneMappingNode: Unsupported Tone Mapping configuration.', toneMapping );
outputNode = colorNode;
}
return outputNode;
}
}
/**
* TSL function for creating a tone mapping node.
*
* @function
* @param {Number} mapping - The tone mapping type.
* @param {Node<float> | Number} exposure - The tone mapping exposure.
* @param {Node<vec3> | Color} color - The color node to process.
* @returns {ToneMappingNode<vec3>}
*/
const toneMapping = ( mapping, exposure, color ) => nodeObject( new ToneMappingNode( mapping, nodeObject( exposure ), nodeObject( color ) ) );
/**
* TSL object that represents the global tone mapping exposure of the renderer.
*
* @type {RendererReferenceNode<vec3>}
*/
const toneMappingExposure = /*@__PURE__*/ rendererReference( 'toneMappingExposure', 'float' );
addMethodChaining( 'toneMapping', ( color, mapping, exposure ) => toneMapping( mapping, exposure, color ) );
/** @module BufferAttributeNode **/
/**
* In earlier `three.js` versions it was only possible to define attribute data
* on geometry level. With `BufferAttributeNode`, it is also possible to do this
* on the node level.
* ```js
* const geometry = new THREE.PlaneGeometry();
* const positionAttribute = geometry.getAttribute( 'position' );
*
* const colors = [];
* for ( let i = 0; i < position.count; i ++ ) {
* colors.push( 1, 0, 0 );
* }
*
* material.colorNode = bufferAttribute( new THREE.Float32BufferAttribute( colors, 3 ) );
* ```
* This new approach is especially interesting when geometry data are generated via
* compute shaders. The below line converts a storage buffer into an attribute node.
* ```js
* material.positionNode = positionBuffer.toAttribute();
* ```
* @augments InputNode
*/
class BufferAttributeNode extends InputNode {
static get type() {
return 'BufferAttributeNode';
}
/**
* Constructs a new buffer attribute node.
*
* @param {BufferAttribute|InterleavedBuffer|TypedArray} value - The attribute data.
* @param {String?} [bufferType=null] - The buffer type (e.g. `'vec3'`).
* @param {Number} [bufferStride=0] - The buffer stride.
* @param {Number} [bufferOffset=0] - The buffer offset.
*/
constructor( value, bufferType = null, bufferStride = 0, bufferOffset = 0 ) {
super( value, bufferType );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isBufferNode = true;
/**
* The buffer type (e.g. `'vec3'`).
*
* @type {String}
* @default null
*/
this.bufferType = bufferType;
/**
* The buffer stride.
*
* @type {Number}
* @default 0
*/
this.bufferStride = bufferStride;
/**
* The buffer offset.
*
* @type {Number}
* @default 0
*/
this.bufferOffset = bufferOffset;
/**
* The usage property. Set this to `THREE.DynamicDrawUsage` via `.setUsage()`,
* if you are planning to update the attribute data per frame.
*
* @type {Number}
* @default StaticDrawUsage
*/
this.usage = StaticDrawUsage;
/**
* Whether the attribute is instanced or not.
*
* @type {Boolean}
* @default false
*/
this.instanced = false;
/**
* A reference to the buffer attribute.
*
* @type {BufferAttribute?}
* @default null
*/
this.attribute = null;
/**
* `BufferAttributeNode` sets this property to `true` by default.
*
* @type {Boolean}
* @default true
*/
this.global = true;
if ( value && value.isBufferAttribute === true ) {
this.attribute = value;
this.usage = value.usage;
this.instanced = value.isInstancedBufferAttribute;
}
}
/**
* This method is overwritten since the attribute data might be shared
* and thus the hash should be shared as well.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The hash.
*/
getHash( builder ) {
if ( this.bufferStride === 0 && this.bufferOffset === 0 ) {
let bufferData = builder.globalCache.getData( this.value );
if ( bufferData === undefined ) {
bufferData = {
node: this
};
builder.globalCache.setData( this.value, bufferData );
}
return bufferData.node.uuid;
}
return this.uuid;
}
/**
* This method is overwritten since the node type is inferred from
* the buffer attribute.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The node type.
*/
getNodeType( builder ) {
if ( this.bufferType === null ) {
this.bufferType = builder.getTypeFromAttribute( this.attribute );
}
return this.bufferType;
}
/**
* Depending on which value was passed to the node, `setup()` behaves
* differently. If no instance of `BufferAttribute` was passed, the method
* creates an internal attribute and configures it respectively.
*
* @param {NodeBuilder} builder - The current node builder.
*/
setup( builder ) {
if ( this.attribute !== null ) return;
const type = this.getNodeType( builder );
const array = this.value;
const itemSize = builder.getTypeLength( type );
const stride = this.bufferStride || itemSize;
const offset = this.bufferOffset;
const buffer = array.isInterleavedBuffer === true ? array : new InterleavedBuffer( array, stride );
const bufferAttribute = new InterleavedBufferAttribute( buffer, itemSize, offset );
buffer.setUsage( this.usage );
this.attribute = bufferAttribute;
this.attribute.isInstancedBufferAttribute = this.instanced; // @TODO: Add a possible: InstancedInterleavedBufferAttribute
}
/**
* Generates the code snippet of the buffer attribute node.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The generated code snippet.
*/
generate( builder ) {
const nodeType = this.getNodeType( builder );
const nodeAttribute = builder.getBufferAttributeFromNode( this, nodeType );
const propertyName = builder.getPropertyName( nodeAttribute );
let output = null;
if ( builder.shaderStage === 'vertex' || builder.shaderStage === 'compute' ) {
this.name = propertyName;
output = propertyName;
} else {
const nodeVarying = varying( this );
output = nodeVarying.build( builder, nodeType );
}
return output;
}
/**
* Overwrites the default implementation to return a fixed value `'bufferAttribute'`.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The input type.
*/
getInputType( /*builder*/ ) {
return 'bufferAttribute';
}
/**
* Sets the `usage` property to the given value.
*
* @param {Number} value - The usage to set.
* @return {BufferAttributeNode} A reference to this node.
*/
setUsage( value ) {
this.usage = value;
if ( this.attribute && this.attribute.isBufferAttribute === true ) {
this.attribute.usage = value;
}
return this;
}
/**
* Sets the `instanced` property to the given value.
*
* @param {Boolean} value - The value to set.
* @return {BufferAttributeNode} A reference to this node.
*/
setInstanced( value ) {
this.instanced = value;
return this;
}
}
/**
* TSL function for creating a buffer attribute node.
*
* @function
* @param {BufferAttribute|InterleavedBuffer|TypedArray} array - The attribute data.
* @param {String?} [type=null] - The buffer type (e.g. `'vec3'`).
* @param {Number} [stride=0] - The buffer stride.
* @param {Number} [offset=0] - The buffer offset.
* @returns {BufferAttributeNode}
*/
const bufferAttribute = ( array, type = null, stride = 0, offset = 0 ) => nodeObject( new BufferAttributeNode( array, type, stride, offset ) );
/**
* TSL function for creating a buffer attribute node but with dynamic draw usage.
* Use this function if attribute data are updated per frame.
*
* @function
* @param {BufferAttribute|InterleavedBuffer|TypedArray} array - The attribute data.
* @param {String?} [type=null] - The buffer type (e.g. `'vec3'`).
* @param {Number} [stride=0] - The buffer stride.
* @param {Number} [offset=0] - The buffer offset.
* @returns {BufferAttributeNode}
*/
const dynamicBufferAttribute = ( array, type = null, stride = 0, offset = 0 ) => bufferAttribute( array, type, stride, offset ).setUsage( DynamicDrawUsage );
/**
* TSL function for creating a buffer attribute node but with enabled instancing
*
* @function
* @param {BufferAttribute|InterleavedBuffer|TypedArray} array - The attribute data.
* @param {String?} [type=null] - The buffer type (e.g. `'vec3'`).
* @param {Number} [stride=0] - The buffer stride.
* @param {Number} [offset=0] - The buffer offset.
* @returns {BufferAttributeNode}
*/
const instancedBufferAttribute = ( array, type = null, stride = 0, offset = 0 ) => bufferAttribute( array, type, stride, offset ).setInstanced( true );
/**
* TSL function for creating a buffer attribute node but with dynamic draw usage and enabled instancing
*
* @function
* @param {BufferAttribute|InterleavedBuffer|TypedArray} array - The attribute data.
* @param {String?} [type=null] - The buffer type (e.g. `'vec3'`).
* @param {Number} [stride=0] - The buffer stride.
* @param {Number} [offset=0] - The buffer offset.
* @returns {BufferAttributeNode}
*/
const instancedDynamicBufferAttribute = ( array, type = null, stride = 0, offset = 0 ) => dynamicBufferAttribute( array, type, stride, offset ).setInstanced( true );
addMethodChaining( 'toAttribute', ( bufferNode ) => bufferAttribute( bufferNode.value ) );
/** @module ComputeNode **/
/**
* TODO
*
* @augments Node
*/
class ComputeNode extends Node {
static get type() {
return 'ComputeNode';
}
/**
* Constructs a new compute node.
*
* @param {Node} computeNode - TODO
* @param {Number} count - TODO.
* @param {Array<Number>} [workgroupSize=[64]] - TODO.
*/
constructor( computeNode, count, workgroupSize = [ 64 ] ) {
super( 'void' );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isComputeNode = true;
/**
* TODO
*
* @type {Node}
*/
this.computeNode = computeNode;
/**
* TODO
*
* @type {Number}
*/
this.count = count;
/**
* TODO
*
* @type {Array<Number>}
* @default [64]
*/
this.workgroupSize = workgroupSize;
/**
* TODO
*
* @type {Number}
*/
this.dispatchCount = 0;
/**
* TODO
*
* @type {Number}
*/
this.version = 1;
/**
* The name or label of the uniform.
*
* @type {String}
* @default ''
*/
this.name = '';
/**
* The `updateBeforeType` is set to `NodeUpdateType.OBJECT` since {@link ComputeNode#updateBefore}
* is executed once per object by default.
*
* @type {String}
* @default 'object'
*/
this.updateBeforeType = NodeUpdateType.OBJECT;
/**
* TODO
*
* @type {Function}
*/
this.onInitFunction = null;
this.updateDispatchCount();
}
/**
* Executes the `dispose` event for this node.
*/
dispose() {
this.dispatchEvent( { type: 'dispose' } );
}
/**
* Sets the {@link ComputeNode#name} property.
*
* @param {String} name - The name of the uniform.
* @return {ComputeNode} A reference to this node.
*/
label( name ) {
this.name = name;
return this;
}
/**
* TODO
*/
updateDispatchCount() {
const { count, workgroupSize } = this;
let size = workgroupSize[ 0 ];
for ( let i = 1; i < workgroupSize.length; i ++ )
size *= workgroupSize[ i ];
this.dispatchCount = Math.ceil( count / size );
}
/**
* TODO
*
* @param {Function} callback - TODO.
* @return {ComputeNode} A reference to this node.
*/
onInit( callback ) {
this.onInitFunction = callback;
return this;
}
/**
* The method execute the compute for this node.
*
* @param {NodeFrame} frame - A reference to the current node frame.
*/
updateBefore( { renderer } ) {
renderer.compute( this );
}
generate( builder ) {
const { shaderStage } = builder;
if ( shaderStage === 'compute' ) {
const snippet = this.computeNode.build( builder, 'void' );
if ( snippet !== '' ) {
builder.addLineFlowCode( snippet, this );
}
}
}
}
/**
* TSL function for creating a compute node.
*
* @function
* @param {Node} node - TODO
* @param {Number} count - TODO.
* @param {Array<Number>} [workgroupSize=[64]] - TODO.
* @returns {AtomicFunctionNode}
*/
const compute = ( node, count, workgroupSize ) => nodeObject( new ComputeNode( nodeObject( node ), count, workgroupSize ) );
addMethodChaining( 'compute', compute );
/** @module CacheNode **/
/**
* This node can be used as a cache management component for another node.
* Caching is in general used by default in {@link NodeBuilder} but this node
* allows the usage of a shared parent cache during the build process.
*
* @augments Node
*/
class CacheNode extends Node {
static get type() {
return 'CacheNode';
}
/**
* Constructs a new cache node.
*
* @param {Node} node - The node that should be cached.
* @param {Boolean} [parent=true] - Whether this node refers to a shared parent cache or not.
*/
constructor( node, parent = true ) {
super();
/**
* The node that should be cached.
*
* @type {Node}
*/
this.node = node;
/**
* Whether this node refers to a shared parent cache or not.
*
* @type {Boolean}
* @default true
*/
this.parent = parent;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isCacheNode = true;
}
getNodeType( builder ) {
const previousCache = builder.getCache();
const cache = builder.getCacheFromNode( this, this.parent );
builder.setCache( cache );
const nodeType = this.node.getNodeType( builder );
builder.setCache( previousCache );
return nodeType;
}
build( builder, ...params ) {
const previousCache = builder.getCache();
const cache = builder.getCacheFromNode( this, this.parent );
builder.setCache( cache );
const data = this.node.build( builder, ...params );
builder.setCache( previousCache );
return data;
}
}
/**
* TSL function for creating a cache node.
*
* @function
* @param {Node} node - The node that should be cached.
* @param {Boolean} parent - Whether this node refers to a shared parent cache or not.
* @returns {CacheNode}
*/
const cache = ( node, parent ) => nodeObject( new CacheNode( nodeObject( node ), parent ) );
addMethodChaining( 'cache', cache );
/** @module BypassNode **/
/**
* The class generates the code of a given node but returns another node in the output.
* This can be used to call a method or node that does not return a value, i.e.
* type `void` on an input where returning a value is required. Example:
*
* ```js
* material.colorNode = myColor.bypass( runVoidFn() )
*```
*
* @augments Node
*/
class BypassNode extends Node {
static get type() {
return 'BypassNode';
}
/**
* Constructs a new bypass node.
*
* @param {Node} outputNode - The output node.
* @param {Node} callNode - The call node.
*/
constructor( outputNode, callNode ) {
super();
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isBypassNode = true;
/**
* The output node.
*
* @type {Node}
*/
this.outputNode = outputNode;
/**
* The call node.
*
* @type {Node}
*/
this.callNode = callNode;
}
getNodeType( builder ) {
return this.outputNode.getNodeType( builder );
}
generate( builder ) {
const snippet = this.callNode.build( builder, 'void' );
if ( snippet !== '' ) {
builder.addLineFlowCode( snippet, this );
}
return this.outputNode.build( builder );
}
}
/**
* TSL function for creating a bypass node.
*
* @function
* @param {Node} outputNode - The output node.
* @param {Node} callNode - The call node.
* @returns {BypassNode}
*/
const bypass = /*@__PURE__*/ nodeProxy( BypassNode );
addMethodChaining( 'bypass', bypass );
/** @module RemapNode **/
/**
* This node allows to remap a node value from one range into another. E.g a value of
* `0.4` in the range `[ 0.3, 0.5 ]` should be remapped into the normalized range `[ 0, 1 ]`.
* `RemapNode` takes care of that and converts the original value of `0.4` to `0.5`.
*
* @augments Node
*/
class RemapNode extends Node {
static get type() {
return 'RemapNode';
}
/**
* Constructs a new remap node.
*
* @param {Node} node - The node that should be remapped.
* @param {Node} inLowNode - The source or current lower bound of the range.
* @param {Node} inHighNode - The source or current upper bound of the range.
* @param {Node} [outLowNode=float(0)] - The target lower bound of the range.
* @param {Node} [outHighNode=float(1)] - The target upper bound of the range.
*/
constructor( node, inLowNode, inHighNode, outLowNode = float( 0 ), outHighNode = float( 1 ) ) {
super();
/**
* The node that should be remapped.
*
* @type {Node}
*/
this.node = node;
/**
* The source or current lower bound of the range.
*
* @type {Node}
*/
this.inLowNode = inLowNode;
/**
* The source or current upper bound of the range.
*
* @type {Node}
*/
this.inHighNode = inHighNode;
/**
* The target lower bound of the range.
*
* @type {Node}
* @default float(0)
*/
this.outLowNode = outLowNode;
/**
* The target upper bound of the range.
*
* @type {Node}
* @default float(1)
*/
this.outHighNode = outHighNode;
/**
* Whether the node value should be clamped before
* remapping it to the target range.
*
* @type {Boolean}
* @default true
*/
this.doClamp = true;
}
setup() {
const { node, inLowNode, inHighNode, outLowNode, outHighNode, doClamp } = this;
let t = node.sub( inLowNode ).div( inHighNode.sub( inLowNode ) );
if ( doClamp === true ) t = t.clamp();
return t.mul( outHighNode.sub( outLowNode ) ).add( outLowNode );
}
}
/**
* TSL function for creating a remap node.
*
* @function
* @param {Node} node - The node that should be remapped.
* @param {Node} inLowNode - The source or current lower bound of the range.
* @param {Node} inHighNode - The source or current upper bound of the range.
* @param {Node} [outLowNode=float(0)] - The target lower bound of the range.
* @param {Node} [outHighNode=float(1)] - The target upper bound of the range.
* @returns {RemapNode}
*/
const remap = /*@__PURE__*/ nodeProxy( RemapNode, null, null, { doClamp: false } );
/**
* TSL function for creating a remap node, but with enabled clamping.
*
* @function
* @param {Node} node - The node that should be remapped.
* @param {Node} inLowNode - The source or current lower bound of the range.
* @param {Node} inHighNode - The source or current upper bound of the range.
* @param {Node} [outLowNode=float(0)] - The target lower bound of the range.
* @param {Node} [outHighNode=float(1)] - The target upper bound of the range.
* @returns {RemapNode}
*/
const remapClamp = /*@__PURE__*/ nodeProxy( RemapNode );
addMethodChaining( 'remap', remap );
addMethodChaining( 'remapClamp', remapClamp );
/** @module ExpressionNode **/
/**
* This class can be used to implement basic expressions in shader code.
* Basic examples for that are `return`, `continue` or `discard` statements.
*
* @augments Node
*/
class ExpressionNode extends Node {
static get type() {
return 'ExpressionNode';
}
/**
* Constructs a new expression node.
*
* @param {String} [snippet=''] - The native code snippet.
* @param {String} [nodeType='void'] - The node type.
*/
constructor( snippet = '', nodeType = 'void' ) {
super( nodeType );
/**
* The native code snippet.
*
* @type {String}
* @default ''
*/
this.snippet = snippet;
}
generate( builder, output ) {
const type = this.getNodeType( builder );
const snippet = this.snippet;
if ( type === 'void' ) {
builder.addLineFlowCode( snippet, this );
} else {
return builder.format( `( ${ snippet } )`, type, output );
}
}
}
/**
* TSL function for creating an expression node.
*
* @function
* @param {String} [snippet=''] - The native code snippet.
* @param {String} [nodeType='void'] - The node type.
* @returns {ExpressionNode}
*/
const expression = /*@__PURE__*/ nodeProxy( ExpressionNode );
/** @module Discard **/
/**
* Represents a `discard` shader operation in TSL.
*
* @method
* @param {ConditionalNode?} conditional - An optional conditional node. It allows to decide whether the discard should be executed or not.
* @return {Node} The `discard` expression.
*/
const Discard = ( conditional ) => ( conditional ? select( conditional, expression( 'discard' ) ) : expression( 'discard' ) ).append();
/**
* Represents a `return` shader operation in TSL.
*
* @method
* @return {ExpressionNode} The `return` expression.
*/
const Return = () => expression( 'return' ).append();
addMethodChaining( 'discard', Discard );
/** @module RenderOutputNode **/
/**
* Normally, tone mapping and color conversion happens automatically
* before outputting pixel too the default (screen) framebuffer. In certain
* post processing setups this happens to late because certain effects
* require e.g. sRGB input. For such scenarios, `RenderOutputNode` can be used
* to apply tone mapping and color space conversion at an arbitrary point
* in the effect chain.
*
* When applying tone mapping and color space conversion manually with this node,
* you have to set {@link PostProcessing#outputColorTransform} to `false`.
*
* ```js
* const postProcessing = new PostProcessing( renderer );
* postProcessing.outputColorTransform = false;
*
* const scenePass = pass( scene, camera );
* const outputPass = renderOutput( scenePass );
*
* postProcessing.outputNode = outputPass;
* ```
*
* @augments TempNode
*/
class RenderOutputNode extends TempNode {
static get type() {
return 'RenderOutputNode';
}
/**
* Constructs a new render output node.
*
* @param {Node} colorNode - The color node to process.
* @param {Number} toneMapping - The tone mapping type.
* @param {String} outputColorSpace - The output color space.
*/
constructor( colorNode, toneMapping, outputColorSpace ) {
super( 'vec4' );
/**
* The color node to process.
*
* @type {Node}
*/
this.colorNode = colorNode;
/**
* The tone mapping type.
*
* @type {Number?}
*/
this.toneMapping = toneMapping;
/**
* The output color space.
*
* @type {String?}
*/
this.outputColorSpace = outputColorSpace;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isRenderOutputNode = true;
}
setup( { context } ) {
let outputNode = this.colorNode || context.color;
// tone mapping
const toneMapping = ( this.toneMapping !== null ? this.toneMapping : context.toneMapping ) || NoToneMapping;
const outputColorSpace = ( this.outputColorSpace !== null ? this.outputColorSpace : context.outputColorSpace ) || NoColorSpace;
if ( toneMapping !== NoToneMapping ) {
outputNode = outputNode.toneMapping( toneMapping );
}
// working to output color space
if ( outputColorSpace !== NoColorSpace && outputColorSpace !== ColorManagement.workingColorSpace ) {
outputNode = outputNode.workingToColorSpace( outputColorSpace );
}
return outputNode;
}
}
/**
* TSL function for creating a posterize node.
*
* @function
* @param {Node} color - The color node to process.
* @param {Number?} [toneMapping=null] - The tone mapping type.
* @param {String?} [outputColorSpace=null] - The output color space.
* @returns {RenderOutputNode}
*/
const renderOutput = ( color, toneMapping = null, outputColorSpace = null ) => nodeObject( new RenderOutputNode( nodeObject( color ), toneMapping, outputColorSpace ) );
addMethodChaining( 'renderOutput', renderOutput );
// Non-PURE exports list, side-effects are required here.
// TSL Base Syntax
function addNodeElement( name/*, nodeElement*/ ) {
console.warn( 'THREE.TSLBase: AddNodeElement has been removed in favor of tree-shaking. Trying add', name );
}
/** @module AttributeNode **/
/**
* Base class for representing shader attributes as nodes.
*
* @augments Node
*/
class AttributeNode extends Node {
static get type() {
return 'AttributeNode';
}
/**
* Constructs a new attribute node.
*
* @param {String} attributeName - The name of the attribute.
* @param {String?} nodeType - The node type.
*/
constructor( attributeName, nodeType = null ) {
super( nodeType );
/**
* `AttributeNode` sets this property to `true` by default.
*
* @type {Boolean}
* @default true
*/
this.global = true;
this._attributeName = attributeName;
}
getHash( builder ) {
return this.getAttributeName( builder );
}
getNodeType( builder ) {
let nodeType = this.nodeType;
if ( nodeType === null ) {
const attributeName = this.getAttributeName( builder );
if ( builder.hasGeometryAttribute( attributeName ) ) {
const attribute = builder.geometry.getAttribute( attributeName );
nodeType = builder.getTypeFromAttribute( attribute );
} else {
nodeType = 'float';
}
}
return nodeType;
}
/**
* Sets the attribute name to the given value. The method can be
* overwritten in derived classes if the final name must be computed
* analytically.
*
* @param {String} attributeName - The name of the attribute.
* @return {AttributeNode} A reference to this node.
*/
setAttributeName( attributeName ) {
this._attributeName = attributeName;
return this;
}
/**
* Returns the attribute name of this node. The method can be
* overwritten in derived classes if the final name must be computed
* analytically.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The attribute name.
*/
getAttributeName( /*builder*/ ) {
return this._attributeName;
}
generate( builder ) {
const attributeName = this.getAttributeName( builder );
const nodeType = this.getNodeType( builder );
const geometryAttribute = builder.hasGeometryAttribute( attributeName );
if ( geometryAttribute === true ) {
const attribute = builder.geometry.getAttribute( attributeName );
const attributeType = builder.getTypeFromAttribute( attribute );
const nodeAttribute = builder.getAttribute( attributeName, attributeType );
if ( builder.shaderStage === 'vertex' ) {
return builder.format( nodeAttribute.name, attributeType, nodeType );
} else {
const nodeVarying = varying( this );
return nodeVarying.build( builder, nodeType );
}
} else {
console.warn( `AttributeNode: Vertex attribute "${ attributeName }" not found on geometry.` );
return builder.generateConst( nodeType );
}
}
serialize( data ) {
super.serialize( data );
data.global = this.global;
data._attributeName = this._attributeName;
}
deserialize( data ) {
super.deserialize( data );
this.global = data.global;
this._attributeName = data._attributeName;
}
}
/**
* TSL function for creating an attribute node.
*
* @function
* @param {String} name - The name of the attribute.
* @param {String?} nodeType - The node type.
* @returns {AttributeNode}
*/
const attribute = ( name, nodeType ) => nodeObject( new AttributeNode( name, nodeType ) );
/** @module UV **/
/**
* TSL function for creating an uv attribute node with the given index.
*
* @function
* @param {Number} [index=0] - The uv index.
* @return {AttributeNode<vec2>} The uv attribute node.
*/
const uv = ( index = 0 ) => attribute( 'uv' + ( index > 0 ? index : '' ), 'vec2' );
/** @module TextureSizeNode **/
/**
* A node that represents the dimensions of a texture. The texture size is
* retrieved in the shader via built-in shader functions like `textureDimensions()`
* or `textureSize()`.
*
* @augments Node
*/
class TextureSizeNode extends Node {
static get type() {
return 'TextureSizeNode';
}
/**
* Constructs a new texture size node.
*
* @param {TextureNode} textureNode - A texture node which size should be retrieved.
* @param {Node<int>?} [levelNode=null] - A level node which defines the requested mip.
*/
constructor( textureNode, levelNode = null ) {
super( 'uvec2' );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isTextureSizeNode = true;
/**
* A texture node which size should be retrieved.
*
* @type {TextureNode}
*/
this.textureNode = textureNode;
/**
* A level node which defines the requested mip.
*
* @type {Node<int>}
* @default null
*/
this.levelNode = levelNode;
}
generate( builder, output ) {
const textureProperty = this.textureNode.build( builder, 'property' );
const level = this.levelNode === null ? '0' : this.levelNode.build( builder, 'int' );
return builder.format( `${ builder.getMethod( 'textureDimensions' ) }( ${ textureProperty }, ${ level } )`, this.getNodeType( builder ), output );
}
}
/**
* TSL function for creating a texture size node.
*
* @function
* @param {TextureNode} textureNode - A texture node which size should be retrieved.
* @param {Node<int>?} [levelNode=null] - A level node which defines the requested mip.
* @returns {TextureSizeNode}
*/
const textureSize = /*@__PURE__*/ nodeProxy( TextureSizeNode );
/** @module MatcapUVNode **/
/**
* A special type of uniform node that computes the
* maximum mipmap level for a given texture node.
*
* ```js
* const level = maxMipLevel( textureNode );
* ```
*
* @augments module:UniformNode~UniformNode
*/
class MaxMipLevelNode extends UniformNode {
static get type() {
return 'MaxMipLevelNode';
}
/**
* Constructs a new max mip level node.
*
* @param {TextureNode} textureNode - The texture node to compute the max mip level for.
*/
constructor( textureNode ) {
super( 0 );
/**
* The texture node to compute the max mip level for.
*
* @private
* @type {TextureNode}
*/
this._textureNode = textureNode;
/**
* The `updateType` is set to `NodeUpdateType.FRAME` since the node updates
* the texture once per frame in its {@link MaxMipLevelNode#update} method.
*
* @type {String}
* @default 'frame'
*/
this.updateType = NodeUpdateType.FRAME;
}
/**
* The texture node to compute the max mip level for.
*
* @readonly
* @type {TextureNode}
*/
get textureNode() {
return this._textureNode;
}
/**
* The texture.
*
* @readonly
* @type {Texture}
*/
get texture() {
return this._textureNode.value;
}
update() {
const texture = this.texture;
const images = texture.images;
const image = ( images && images.length > 0 ) ? ( ( images[ 0 ] && images[ 0 ].image ) || images[ 0 ] ) : texture.image;
if ( image && image.width !== undefined ) {
const { width, height } = image;
this.value = Math.log2( Math.max( width, height ) );
}
}
}
/**
* TSL function for creating a max mip level node.
*
* @function
* @param {TextureNode} textureNode - The texture node to compute the max mip level for.
* @returns {MaxMipLevelNode}
*/
const maxMipLevel = /*@__PURE__*/ nodeProxy( MaxMipLevelNode );
/** @module TextureNode **/
/**
* This type of uniform node represents a 2D texture.
*
* @augments module:UniformNode~UniformNode
*/
class TextureNode extends UniformNode {
static get type() {
return 'TextureNode';
}
/**
* Constructs a new texture node.
*
* @param {Texture} value - The texture.
* @param {Node<vec2|vec3>?} [uvNode=null] - The uv node.
* @param {Node<int>?} [levelNode=null] - The level node.
* @param {Node<float>?} [biasNode=null] - The bias node.
*/
constructor( value, uvNode = null, levelNode = null, biasNode = null ) {
super( value );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isTextureNode = true;
/**
* Represents the texture coordinates.
*
* @type {Node<vec2|vec3>?}
* @default null
*/
this.uvNode = uvNode;
/**
* Represents the mip level that should be selected.
*
* @type {Node<int>?}
* @default null
*/
this.levelNode = levelNode;
/**
* Represents the bias to be applied during level-of-detail computation.
*
* @type {Node<float>?}
* @default null
*/
this.biasNode = biasNode;
/**
* Represents a reference value a texture sample is compared to.
*
* @type {Node<float>?}
* @default null
*/
this.compareNode = null;
/**
* When using texture arrays, the depth node defines the layer to select.
*
* @type {Node<int>?}
* @default null
*/
this.depthNode = null;
/**
* When defined, a texture is sampled using explicit gradients.
*
* @type {Array<Node<vec2>>?}
* @default null
*/
this.gradNode = null;
/**
* Whether texture values should be sampled or fetched.
*
* @type {Boolean}
* @default true
*/
this.sampler = true;
/**
* Whether the uv transformation matrix should be
* automatically updated or not. Use `setUpdateMatrix()`
* if you want to change the value of the property.
*
* @type {Boolean}
* @default false
*/
this.updateMatrix = false;
/**
* By default the `update()` method is not executed. `setUpdateMatrix()`
* sets the value to `frame` when the uv transformation matrix should
* automatically be updated.
*
* @type {String}
* @default 'none'
*/
this.updateType = NodeUpdateType.NONE;
/**
* The reference node.
*
* @type {Node?}
* @default null
*/
this.referenceNode = null;
/**
* The texture value is stored in a private property.
*
* @private
* @type {Texture}
*/
this._value = value;
/**
* The uniform node that represents the uv transformation matrix.
*
* @private
* @type {UniformNode<mat3>?}
*/
this._matrixUniform = null;
this.setUpdateMatrix( uvNode === null );
}
set value( value ) {
if ( this.referenceNode ) {
this.referenceNode.value = value;
} else {
this._value = value;
}
}
/**
* The texture value.
*
* @type {Texture}
*/
get value() {
return this.referenceNode ? this.referenceNode.value : this._value;
}
/**
* Overwritten since the uniform hash is defined by the texture's UUID.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The uniform hash.
*/
getUniformHash( /*builder*/ ) {
return this.value.uuid;
}
/**
* Overwritten since the node type is inferred from the texture type.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The node type.
*/
getNodeType( /*builder*/ ) {
if ( this.value.isDepthTexture === true ) return 'float';
if ( this.value.type === UnsignedIntType ) {
return 'uvec4';
} else if ( this.value.type === IntType ) {
return 'ivec4';
}
return 'vec4';
}
/**
* Overwrites the default implementation to return a fixed value `'texture'`.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The input type.
*/
getInputType( /*builder*/ ) {
return 'texture';
}
/**
* Returns a default uvs based on the current texture's channel.
*
* @return {AttributeNode<vec2>} The default uvs.
*/
getDefaultUV() {
return uv( this.value.channel );
}
/**
* Overwritten to always return the texture reference of the node.
*
* @param {Any} state - This method can be invocated in different contexts so `state` can refer to any object type.
* @return {Texture} The texture reference.
*/
updateReference( /*state*/ ) {
return this.value;
}
/**
* Transforms the given uv node with the texture transformation matrix.
*
* @param {Node} uvNode - The uv node to transform.
* @return {Node} The transformed uv node.
*/
getTransformedUV( uvNode ) {
if ( this._matrixUniform === null ) this._matrixUniform = uniform( this.value.matrix );
return this._matrixUniform.mul( vec3( uvNode, 1 ) ).xy;
}
/**
* Defines whether the uv transformation matrix should automatically be updated or not.
*
* @param {Boolean} value - The update toggle.
* @return {TextureNode} A reference to this node.
*/
setUpdateMatrix( value ) {
this.updateMatrix = value;
this.updateType = value ? NodeUpdateType.RENDER : NodeUpdateType.NONE;
return this;
}
/**
* Setups the uv node. Depending on the backend as well as texture's image and type, it might be necessary
* to modify the uv node for correct sampling.
*
* @param {NodeBuilder} builder - The current node builder.
* @param {Node} uvNode - The uv node to setup.
* @return {Node} The updated uv node.
*/
setupUV( builder, uvNode ) {
const texture = this.value;
if ( builder.isFlipY() && ( ( texture.image instanceof ImageBitmap && texture.flipY === true ) || texture.isRenderTargetTexture === true || texture.isFramebufferTexture === true || texture.isDepthTexture === true ) ) {
if ( this.sampler ) {
uvNode = uvNode.flipY();
} else {
uvNode = uvNode.setY( int( textureSize( this, this.levelNode ).y ).sub( uvNode.y ).sub( 1 ) );
}
}
return uvNode;
}
/**
* Setups texture node by preparing the internal nodes for code generation.
*
* @param {NodeBuilder} builder - The current node builder.
*/
setup( builder ) {
const properties = builder.getNodeProperties( this );
properties.referenceNode = this.referenceNode;
//
const texture = this.value;
if ( ! texture || texture.isTexture !== true ) {
throw new Error( 'THREE.TSL: `texture( value )` function expects a valid instance of THREE.Texture().' );
}
//
let uvNode = this.uvNode;
if ( ( uvNode === null || builder.context.forceUVContext === true ) && builder.context.getUV ) {
uvNode = builder.context.getUV( this );
}
if ( ! uvNode ) uvNode = this.getDefaultUV();
if ( this.updateMatrix === true ) {
uvNode = this.getTransformedUV( uvNode );
}
uvNode = this.setupUV( builder, uvNode );
//
let levelNode = this.levelNode;
if ( levelNode === null && builder.context.getTextureLevel ) {
levelNode = builder.context.getTextureLevel( this );
}
//
properties.uvNode = uvNode;
properties.levelNode = levelNode;
properties.biasNode = this.biasNode;
properties.compareNode = this.compareNode;
properties.gradNode = this.gradNode;
properties.depthNode = this.depthNode;
}
/**
* Generates the uv code snippet.
*
* @param {NodeBuilder} builder - The current node builder.
* @param {Node} uvNode - The uv node to generate code for.
* @return {String} The generated code snippet.
*/
generateUV( builder, uvNode ) {
return uvNode.build( builder, this.sampler === true ? 'vec2' : 'ivec2' );
}
/**
* Generates the snippet for the texture sampling.
*
* @param {NodeBuilder} builder - The current node builder.
* @param {String} textureProperty - The texture property.
* @param {String} uvSnippet - The uv snippet.
* @param {String?} levelSnippet - The level snippet.
* @param {String?} biasSnippet - The bias snippet.
* @param {String?} depthSnippet - The depth snippet.
* @param {String?} compareSnippet - The compare snippet.
* @param {Array<String>?} gradSnippet - The grad snippet.
* @return {String} The generated code snippet.
*/
generateSnippet( builder, textureProperty, uvSnippet, levelSnippet, biasSnippet, depthSnippet, compareSnippet, gradSnippet ) {
const texture = this.value;
let snippet;
if ( levelSnippet ) {
snippet = builder.generateTextureLevel( texture, textureProperty, uvSnippet, levelSnippet, depthSnippet );
} else if ( biasSnippet ) {
snippet = builder.generateTextureBias( texture, textureProperty, uvSnippet, biasSnippet, depthSnippet );
} else if ( gradSnippet ) {
snippet = builder.generateTextureGrad( texture, textureProperty, uvSnippet, gradSnippet, depthSnippet );
} else if ( compareSnippet ) {
snippet = builder.generateTextureCompare( texture, textureProperty, uvSnippet, compareSnippet, depthSnippet );
} else if ( this.sampler === false ) {
snippet = builder.generateTextureLoad( texture, textureProperty, uvSnippet, depthSnippet );
} else {
snippet = builder.generateTexture( texture, textureProperty, uvSnippet, depthSnippet );
}
return snippet;
}
/**
* Generates the code snippet of the texture node.
*
* @param {NodeBuilder} builder - The current node builder.
* @param {String} output - The current output.
* @return {String} The generated code snippet.
*/
generate( builder, output ) {
const texture = this.value;
const properties = builder.getNodeProperties( this );
const textureProperty = super.generate( builder, 'property' );
if ( output === 'sampler' ) {
return textureProperty + '_sampler';
} else if ( builder.isReference( output ) ) {
return textureProperty;
} else {
const nodeData = builder.getDataFromNode( this );
let propertyName = nodeData.propertyName;
if ( propertyName === undefined ) {
const { uvNode, levelNode, biasNode, compareNode, depthNode, gradNode } = properties;
const uvSnippet = this.generateUV( builder, uvNode );
const levelSnippet = levelNode ? levelNode.build( builder, 'float' ) : null;
const biasSnippet = biasNode ? biasNode.build( builder, 'float' ) : null;
const depthSnippet = depthNode ? depthNode.build( builder, 'int' ) : null;
const compareSnippet = compareNode ? compareNode.build( builder, 'float' ) : null;
const gradSnippet = gradNode ? [ gradNode[ 0 ].build( builder, 'vec2' ), gradNode[ 1 ].build( builder, 'vec2' ) ] : null;
const nodeVar = builder.getVarFromNode( this );
propertyName = builder.getPropertyName( nodeVar );
const snippet = this.generateSnippet( builder, textureProperty, uvSnippet, levelSnippet, biasSnippet, depthSnippet, compareSnippet, gradSnippet );
builder.addLineFlowCode( `${propertyName} = ${snippet}`, this );
nodeData.snippet = snippet;
nodeData.propertyName = propertyName;
}
let snippet = propertyName;
const nodeType = this.getNodeType( builder );
if ( builder.needsToWorkingColorSpace( texture ) ) {
snippet = colorSpaceToWorking( expression( snippet, nodeType ), texture.colorSpace ).setup( builder ).build( builder, nodeType );
}
return builder.format( snippet, nodeType, output );
}
}
/**
* Sets the sampler value.
*
* @param {Boolean} value - The sampler value to set.
* @return {TextureNode} A reference to this texture node.
*/
setSampler( value ) {
this.sampler = value;
return this;
}
/**
* Returns the sampler value.
*
* @return {Boolean} The sampler value.
*/
getSampler() {
return this.sampler;
}
// @TODO: Move to TSL
/**
* @function
* @deprecated since r172. Use {@link TextureNode#sample} instead.
*
* @param {Node} uvNode - The uv node.
* @return {TextureNode} A texture node representing the texture sample.
*/
uv( uvNode ) { // @deprecated, r172
console.warn( 'THREE.TextureNode: .uv() has been renamed. Use .sample() instead.' );
return this.sample( uvNode );
}
/**
* Samples the texture with the given uv node.
*
* @param {Node} uvNode - The uv node.
* @return {TextureNode} A texture node representing the texture sample.
*/
sample( uvNode ) {
const textureNode = this.clone();
textureNode.uvNode = nodeObject( uvNode );
textureNode.referenceNode = this.getSelf();
return nodeObject( textureNode );
}
/**
* Samples a blurred version of the texture by defining an internal bias.
*
* @param {Node<float>} amountNode - How blurred the texture should be.
* @return {TextureNode} A texture node representing the texture sample.
*/
blur( amountNode ) {
const textureNode = this.clone();
textureNode.biasNode = nodeObject( amountNode ).mul( maxMipLevel( textureNode ) );
textureNode.referenceNode = this.getSelf();
return nodeObject( textureNode );
}
/**
* Samples a specific mip of the texture.
*
* @param {Node<int>} levelNode - The mip level to sample.
* @return {TextureNode} A texture node representing the texture sample.
*/
level( levelNode ) {
const textureNode = this.clone();
textureNode.levelNode = nodeObject( levelNode );
textureNode.referenceNode = this.getSelf();
return nodeObject( textureNode );
}
/**
* Returns the texture size of the requested level.
*
* @param {Node<int>} levelNode - The level to compute the size for.
* @return {TextureSizeNode} The texture size.
*/
size( levelNode ) {
return textureSize( this, levelNode );
}
/**
* Samples the texture with the given bias.
*
* @param {Node<float>} biasNode - The bias node.
* @return {TextureNode} A texture node representing the texture sample.
*/
bias( biasNode ) {
const textureNode = this.clone();
textureNode.biasNode = nodeObject( biasNode );
textureNode.referenceNode = this.getSelf();
return nodeObject( textureNode );
}
/**
* Samples the texture by executing a compare operation.
*
* @param {Node<float>} compareNode - The node that defines the compare value.
* @return {TextureNode} A texture node representing the texture sample.
*/
compare( compareNode ) {
const textureNode = this.clone();
textureNode.compareNode = nodeObject( compareNode );
textureNode.referenceNode = this.getSelf();
return nodeObject( textureNode );
}
/**
* Samples the texture using an explicit gradient.
*
* @param {Node<vec2>} gradNodeX - The gradX node.
* @param {Node<vec2>} gradNodeY - The gradY node.
* @return {TextureNode} A texture node representing the texture sample.
*/
grad( gradNodeX, gradNodeY ) {
const textureNode = this.clone();
textureNode.gradNode = [ nodeObject( gradNodeX ), nodeObject( gradNodeY ) ];
textureNode.referenceNode = this.getSelf();
return nodeObject( textureNode );
}
/**
* Samples the texture by defining a depth node.
*
* @param {Node<int>} depthNode - The depth node.
* @return {TextureNode} A texture node representing the texture sample.
*/
depth( depthNode ) {
const textureNode = this.clone();
textureNode.depthNode = nodeObject( depthNode );
textureNode.referenceNode = this.getSelf();
return nodeObject( textureNode );
}
// --
serialize( data ) {
super.serialize( data );
data.value = this.value.toJSON( data.meta ).uuid;
data.sampler = this.sampler;
data.updateMatrix = this.updateMatrix;
data.updateType = this.updateType;
}
deserialize( data ) {
super.deserialize( data );
this.value = data.meta.textures[ data.value ];
this.sampler = data.sampler;
this.updateMatrix = data.updateMatrix;
this.updateType = data.updateType;
}
/**
* The update is used to implement the update of the uv transformation matrix.
*/
update() {
const texture = this.value;
const matrixUniform = this._matrixUniform;
if ( matrixUniform !== null ) matrixUniform.value = texture.matrix;
if ( texture.matrixAutoUpdate === true ) {
texture.updateMatrix();
}
}
/**
* Clones the texture node.
*
* @return {TextureNode} The cloned texture node.
*/
clone() {
const newNode = new this.constructor( this.value, this.uvNode, this.levelNode, this.biasNode );
newNode.sampler = this.sampler;
return newNode;
}
}
/**
* TSL function for creating a texture node.
*
* @function
* @param {Texture} value - The texture.
* @param {Node<vec2|vec3>?} [uvNode=null] - The uv node.
* @param {Node<int>?} [levelNode=null] - The level node.
* @param {Node<float>?} [biasNode=null] - The bias node.
* @returns {TextureNode}
*/
const texture = /*@__PURE__*/ nodeProxy( TextureNode );
/**
* TSL function for creating a texture node that fetches/loads texels without interpolation.
*
* @function
* @param {Texture} value - The texture.
* @param {Node<vec2|vec3>?} [uvNode=null] - The uv node.
* @param {Node<int>?} [levelNode=null] - The level node.
* @param {Node<float>?} [biasNode=null] - The bias node.
* @returns {TextureNode}
*/
const textureLoad = ( ...params ) => texture( ...params ).setSampler( false );
//export const textureLevel = ( value, uv, level ) => texture( value, uv ).level( level );
/**
* Converts a texture or texture node to a sampler.
*
* @function
* @param {TextureNode|Texture} aTexture - The texture or texture node to convert.
* @returns {Node}
*/
const sampler = ( aTexture ) => ( aTexture.isNode === true ? aTexture : texture( aTexture ) ).convert( 'sampler' );
/** @module BufferNode **/
/**
* A special type of uniform node which represents array-like data
* as uniform buffers. The access usually happens via `element()`
* which returns an instance of {@link ArrayElementNode}. For example:
*
* ```js
* const bufferNode = buffer( array, 'mat4', count );
* const matrixNode = bufferNode.element( index ); // access a matrix from the buffer
* ```
* In general, it is recommended to use the more managed {@link UniformArrayNode}
* since it handles more input types and automatically cares about buffer paddings.
*
* @augments module:UniformNode~UniformNode
*/
class BufferNode extends UniformNode {
static get type() {
return 'BufferNode';
}
/**
* Constructs a new buffer node.
*
* @param {Array<Number>} value - Array-like buffer data.
* @param {String} bufferType - The data type of the buffer.
* @param {Number} [bufferCount=0] - The count of buffer elements.
*/
constructor( value, bufferType, bufferCount = 0 ) {
super( value, bufferType );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isBufferNode = true;
/**
* The data type of the buffer.
*
* @type {String}
*/
this.bufferType = bufferType;
/**
* The uniform node that holds the value of the reference node.
*
* @type {Number}
* @default 0
*/
this.bufferCount = bufferCount;
}
/**
* The data type of the buffer elements.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The element type.
*/
getElementType( builder ) {
return this.getNodeType( builder );
}
/**
* Overwrites the default implementation to return a fixed value `'buffer'`.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The input type.
*/
getInputType( /*builder*/ ) {
return 'buffer';
}
}
/**
* TSL function for creating a buffer node.
*
* @function
* @param {Array} value - Array-like buffer data.
* @param {String} type - The data type of a buffer element.
* @param {Number} count - The count of buffer elements.
* @returns {BufferNode}
*/
const buffer = ( value, type, count ) => nodeObject( new BufferNode( value, type, count ) );
/** @module UniformArrayNode **/
/**
* Represents the element access on uniform array nodes.
*
* @augments ArrayElementNode
*/
class UniformArrayElementNode extends ArrayElementNode {
static get type() {
return 'UniformArrayElementNode';
}
/**
* Constructs a new buffer node.
*
* @param {UniformArrayNode} uniformArrayNode - The uniform array node to access.
* @param {IndexNode} indexNode - The index data that define the position of the accessed element in the array.
*/
constructor( uniformArrayNode, indexNode ) {
super( uniformArrayNode, indexNode );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isArrayBufferElementNode = true;
}
generate( builder ) {
const snippet = super.generate( builder );
const type = this.getNodeType();
const paddedType = this.node.getPaddedType();
return builder.format( snippet, paddedType, type );
}
}
/**
* Similar to {@link module:BufferNode~BufferNode} this module represents array-like data as
* uniform buffers. Unlike {@link module:BufferNode~BufferNode}, it can handle more common
* data types in the array (e.g `three.js` primitives) and automatically
* manage buffer padding. It should be the first choice when working with
* uniforms buffers.
* ```js
* const tintColors = uniformArray( [
* new Color( 1, 0, 0 ),
* new Color( 0, 1, 0 ),
* new Color( 0, 0, 1 )
* ], 'color' );
*
* const redColor = tintColors.element( 0 );
*
* @augments module:BufferNode~BufferNode
*/
class UniformArrayNode extends BufferNode {
static get type() {
return 'UniformArrayNode';
}
/**
* Constructs a new uniform array node.
*
* @param {Array<Any>} value - Array holding the buffer data.
* @param {String?} [elementType=null] - The data type of a buffer element.
*/
constructor( value, elementType = null ) {
super( null );
/**
* Array holding the buffer data. Unlike {@link module:BufferNode~BufferNode}, the array can
* hold number primitives as well as three.js objects like vectors, matrices
* or colors.
*
* @type {Array<Any>}
*/
this.array = value;
/**
* The data type of an array element.
*
* @type {String}
*/
this.elementType = elementType === null ? getValueType( value[ 0 ] ) : elementType;
/**
* The padded type. Uniform buffers must conform to a certain buffer layout
* so a separate type is computed to ensure correct buffer size.
*
* @type {String}
*/
this.paddedType = this.getPaddedType();
/**
* Overwritten since uniform array nodes are updated per render.
*
* @type {String}
* @default 'render'
*/
this.updateType = NodeUpdateType.RENDER;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isArrayBufferNode = true;
}
/**
* This method is overwritten since the node type is inferred from the
* {@link module:UniformArrayNode~UniformArrayNode#paddedType}.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The node type.
*/
getNodeType( /*builder*/ ) {
return this.paddedType;
}
/**
* The data type of the array elements.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The element type.
*/
getElementType() {
return this.elementType;
}
/**
* Returns the padded type based on the element type.
*
* @return {String} The padded type.
*/
getPaddedType() {
const elementType = this.elementType;
let paddedType = 'vec4';
if ( elementType === 'mat2' ) {
paddedType = 'mat2';
} else if ( /mat/.test( elementType ) === true ) {
paddedType = 'mat4';
} else if ( elementType.charAt( 0 ) === 'i' ) {
paddedType = 'ivec4';
} else if ( elementType.charAt( 0 ) === 'u' ) {
paddedType = 'uvec4';
}
return paddedType;
}
/**
* The update makes sure to correctly transfer the data from the (complex) objects
* in the array to the internal, correctly padded value buffer.
*
* @param {NodeFrame} frame - A reference to the current node frame.
*/
update( /*frame*/ ) {
const { array, value } = this;
const elementType = this.elementType;
if ( elementType === 'float' || elementType === 'int' || elementType === 'uint' ) {
for ( let i = 0; i < array.length; i ++ ) {
const index = i * 4;
value[ index ] = array[ i ];
}
} else if ( elementType === 'color' ) {
for ( let i = 0; i < array.length; i ++ ) {
const index = i * 4;
const vector = array[ i ];
value[ index ] = vector.r;
value[ index + 1 ] = vector.g;
value[ index + 2 ] = vector.b || 0;
//value[ index + 3 ] = vector.a || 0;
}
} else if ( elementType === 'mat2' ) {
for ( let i = 0; i < array.length; i ++ ) {
const index = i * 4;
const matrix = array[ i ];
value[ index ] = matrix.elements[ 0 ];
value[ index + 1 ] = matrix.elements[ 1 ];
value[ index + 2 ] = matrix.elements[ 2 ];
value[ index + 3 ] = matrix.elements[ 3 ];
}
} else if ( elementType === 'mat3' ) {
for ( let i = 0; i < array.length; i ++ ) {
const index = i * 16;
const matrix = array[ i ];
value[ index ] = matrix.elements[ 0 ];
value[ index + 1 ] = matrix.elements[ 1 ];
value[ index + 2 ] = matrix.elements[ 2 ];
value[ index + 4 ] = matrix.elements[ 3 ];
value[ index + 5 ] = matrix.elements[ 4 ];
value[ index + 6 ] = matrix.elements[ 5 ];
value[ index + 8 ] = matrix.elements[ 6 ];
value[ index + 9 ] = matrix.elements[ 7 ];
value[ index + 10 ] = matrix.elements[ 8 ];
value[ index + 15 ] = 1;
}
} else if ( elementType === 'mat4' ) {
for ( let i = 0; i < array.length; i ++ ) {
const index = i * 16;
const matrix = array[ i ];
for ( let i = 0; i < matrix.elements.length; i ++ ) {
value[ index + i ] = matrix.elements[ i ];
}
}
} else {
for ( let i = 0; i < array.length; i ++ ) {
const index = i * 4;
const vector = array[ i ];
value[ index ] = vector.x;
value[ index + 1 ] = vector.y;
value[ index + 2 ] = vector.z || 0;
value[ index + 3 ] = vector.w || 0;
}
}
}
/**
* Implement the value buffer creation based on the array data.
*
* @param {NodeBuilder} builder - A reference to the current node builder.
* @return {null}
*/
setup( builder ) {
const length = this.array.length;
const elementType = this.elementType;
let arrayType = Float32Array;
const paddedType = this.paddedType;
const paddedElementLength = builder.getTypeLength( paddedType );
if ( elementType.charAt( 0 ) === 'i' ) arrayType = Int32Array;
if ( elementType.charAt( 0 ) === 'u' ) arrayType = Uint32Array;
this.value = new arrayType( length * paddedElementLength );
this.bufferCount = length;
this.bufferType = paddedType;
return super.setup( builder );
}
/**
* Overwrites the default `element()` method to provide element access
* based on {@link module:UniformArrayNode~UniformArrayNode}.
*
* @param {IndexNode} indexNode - The index node.
* @return {UniformArrayElementNode}
*/
element( indexNode ) {
return nodeObject( new UniformArrayElementNode( this, nodeObject( indexNode ) ) );
}
}
/**
* TSL function for creating an uniform array node.
*
* @function
* @param {Array<Any>} values - Array-like data.
* @param {String?} nodeType - The data type of the array elements.
* @returns {UniformArrayNode}
*/
const uniformArray = ( values, nodeType ) => nodeObject( new UniformArrayNode( values, nodeType ) );
/**
* @function
* @deprecated since r168. Use {@link uniformArray} instead.
*
* @param {Array<Any>} values - Array-like data.
* @param {String} nodeType - The data type of the array elements.
* @returns {UniformArrayNode}
*/
const uniforms = ( values, nodeType ) => { // @deprecated, r168
console.warn( 'TSL.UniformArrayNode: uniforms() has been renamed to uniformArray().' );
return nodeObject( new UniformArrayNode( values, nodeType ) );
};
/** @module Camera **/
/**
* TSL object that represents the current `index` value of the camera if used ArrayCamera.
*
* @type {UniformNode<uint>}
*/
const cameraIndex = /*@__PURE__*/ uniform( 0, 'uint' ).setGroup( sharedUniformGroup( 'cameraIndex' ) ).toVarying( 'v_cameraIndex' );
/**
* TSL object that represents the `near` value of the camera used for the current render.
*
* @type {UniformNode<float>}
*/
const cameraNear = /*@__PURE__*/ uniform( 'float' ).label( 'cameraNear' ).setGroup( renderGroup ).onRenderUpdate( ( { camera } ) => camera.near );
/**
* TSL object that represents the `far` value of the camera used for the current render.
*
* @type {UniformNode<float>}
*/
const cameraFar = /*@__PURE__*/ uniform( 'float' ).label( 'cameraFar' ).setGroup( renderGroup ).onRenderUpdate( ( { camera } ) => camera.far );
/**
* TSL object that represents the projection matrix of the camera used for the current render.
*
* @type {UniformNode<mat4>}
*/
const cameraProjectionMatrix = /*@__PURE__*/ ( Fn( ( { camera } ) => {
let cameraProjectionMatrix;
if ( camera.isArrayCamera && camera.cameras.length > 0 ) {
const matrices = [];
for ( const subCamera of camera.cameras ) {
matrices.push( subCamera.projectionMatrix );
}
const cameraProjectionMatrices = uniformArray( matrices ).setGroup( renderGroup ).label( 'cameraProjectionMatrices' );
cameraProjectionMatrix = cameraProjectionMatrices.element( cameraIndex ).toVar( 'cameraProjectionMatrix' );
} else {
cameraProjectionMatrix = uniform( 'mat4' ).label( 'cameraProjectionMatrix' ).setGroup( renderGroup ).onRenderUpdate( ( { camera } ) => camera.projectionMatrix );
}
return cameraProjectionMatrix;
} ).once() )();
/**
* TSL object that represents the inverse projection matrix of the camera used for the current render.
*
* @type {UniformNode<mat4>}
*/
const cameraProjectionMatrixInverse = /*@__PURE__*/ uniform( 'mat4' ).label( 'cameraProjectionMatrixInverse' ).setGroup( renderGroup ).onRenderUpdate( ( { camera } ) => camera.projectionMatrixInverse );
/**
* TSL object that represents the view matrix of the camera used for the current render.
*
* @type {UniformNode<mat4>}
*/
const cameraViewMatrix = /*@__PURE__*/ ( Fn( ( { camera } ) => {
let cameraViewMatrix;
if ( camera.isArrayCamera && camera.cameras.length > 0 ) {
const matrices = [];
for ( const subCamera of camera.cameras ) {
matrices.push( subCamera.matrixWorldInverse );
}
const cameraViewMatrices = uniformArray( matrices ).setGroup( renderGroup ).label( 'cameraViewMatrices' );
cameraViewMatrix = cameraViewMatrices.element( cameraIndex ).toVar( 'cameraViewMatrix' );
} else {
cameraViewMatrix = uniform( 'mat4' ).label( 'cameraViewMatrix' ).setGroup( renderGroup ).onRenderUpdate( ( { camera } ) => camera.matrixWorldInverse );
}
return cameraViewMatrix;
} ).once() )();
/**
* TSL object that represents the world matrix of the camera used for the current render.
*
* @type {UniformNode<mat4>}
*/
const cameraWorldMatrix = /*@__PURE__*/ uniform( 'mat4' ).label( 'cameraWorldMatrix' ).setGroup( renderGroup ).onRenderUpdate( ( { camera } ) => camera.matrixWorld );
/**
* TSL object that represents the normal matrix of the camera used for the current render.
*
* @type {UniformNode<mat3>}
*/
const cameraNormalMatrix = /*@__PURE__*/ uniform( 'mat3' ).label( 'cameraNormalMatrix' ).setGroup( renderGroup ).onRenderUpdate( ( { camera } ) => camera.normalMatrix );
/**
* TSL object that represents the position in world space of the camera used for the current render.
*
* @type {UniformNode<vec3>}
*/
const cameraPosition = /*@__PURE__*/ uniform( new Vector3() ).label( 'cameraPosition' ).setGroup( renderGroup ).onRenderUpdate( ( { camera }, self ) => self.value.setFromMatrixPosition( camera.matrixWorld ) );
/** @module Object3DNode **/
/**
* This node can be used to access transformation related metrics of 3D objects.
* Depending on the selected scope, a different metric is represented as a uniform
* in the shader. The following scopes are supported:
*
* - `POSITION`: The object's position in world space.
* - `VIEW_POSITION`: The object's position in view/camera space.
* - `DIRECTION`: The object's direction in world space.
* - `SCALE`: The object's scale in world space.
* - `WORLD_MATRIX`: The object's matrix in world space.
*
* @augments Node
*/
class Object3DNode extends Node {
static get type() {
return 'Object3DNode';
}
/**
* Constructs a new object 3D node.
*
* @param {('position'|'viewPosition'|'direction'|'scale'|'worldMatrix')} scope - The node represents a different type of transformation depending on the scope.
* @param {Object3D?} [object3d=null] - The 3D object.
*/
constructor( scope, object3d = null ) {
super();
/**
* The node reports a different type of transformation depending on the scope.
*
* @type {('position'|'viewPosition'|'direction'|'scale'|'worldMatrix')}
*/
this.scope = scope;
/**
* The 3D object.
*
* @type {Object3D?}
* @default null
*/
this.object3d = object3d;
/**
* Overwritten since this type of node is updated per object.
*
* @type {String}
* @default 'object'
*/
this.updateType = NodeUpdateType.OBJECT;
/**
* Holds the value of the node as a uniform.
*
* @private
* @type {UniformNode}
*/
this._uniformNode = new UniformNode( null );
}
/**
* Overwritten since the node type is inferred from the scope.
*
* @return {String} The node type.
*/
getNodeType() {
const scope = this.scope;
if ( scope === Object3DNode.WORLD_MATRIX ) {
return 'mat4';
} else if ( scope === Object3DNode.POSITION || scope === Object3DNode.VIEW_POSITION || scope === Object3DNode.DIRECTION || scope === Object3DNode.SCALE ) {
return 'vec3';
}
}
/**
* Updates the uniform value depending on the scope.
*
* @param {NodeFrame} frame - The current node frame.
*/
update( frame ) {
const object = this.object3d;
const uniformNode = this._uniformNode;
const scope = this.scope;
if ( scope === Object3DNode.WORLD_MATRIX ) {
uniformNode.value = object.matrixWorld;
} else if ( scope === Object3DNode.POSITION ) {
uniformNode.value = uniformNode.value || new Vector3();
uniformNode.value.setFromMatrixPosition( object.matrixWorld );
} else if ( scope === Object3DNode.SCALE ) {
uniformNode.value = uniformNode.value || new Vector3();
uniformNode.value.setFromMatrixScale( object.matrixWorld );
} else if ( scope === Object3DNode.DIRECTION ) {
uniformNode.value = uniformNode.value || new Vector3();
object.getWorldDirection( uniformNode.value );
} else if ( scope === Object3DNode.VIEW_POSITION ) {
const camera = frame.camera;
uniformNode.value = uniformNode.value || new Vector3();
uniformNode.value.setFromMatrixPosition( object.matrixWorld );
uniformNode.value.applyMatrix4( camera.matrixWorldInverse );
}
}
/**
* Generates the code snippet of the uniform node. The node type of the uniform
* node also depends on the selected scope.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The generated code snippet.
*/
generate( builder ) {
const scope = this.scope;
if ( scope === Object3DNode.WORLD_MATRIX ) {
this._uniformNode.nodeType = 'mat4';
} else if ( scope === Object3DNode.POSITION || scope === Object3DNode.VIEW_POSITION || scope === Object3DNode.DIRECTION || scope === Object3DNode.SCALE ) {
this._uniformNode.nodeType = 'vec3';
}
return this._uniformNode.build( builder );
}
serialize( data ) {
super.serialize( data );
data.scope = this.scope;
}
deserialize( data ) {
super.deserialize( data );
this.scope = data.scope;
}
}
Object3DNode.WORLD_MATRIX = 'worldMatrix';
Object3DNode.POSITION = 'position';
Object3DNode.SCALE = 'scale';
Object3DNode.VIEW_POSITION = 'viewPosition';
Object3DNode.DIRECTION = 'direction';
/**
* TSL function for creating an object 3D node that represents the object's direction in world space.
*
* @function
* @param {Object3D?} [object3d=null] - The 3D object.
* @returns {Object3DNode<vec3>}
*/
const objectDirection = /*@__PURE__*/ nodeProxy( Object3DNode, Object3DNode.DIRECTION );
/**
* TSL function for creating an object 3D node that represents the object's world matrix.
*
* @function
* @param {Object3D?} [object3d=null] - The 3D object.
* @returns {Object3DNode<mat4>}
*/
const objectWorldMatrix = /*@__PURE__*/ nodeProxy( Object3DNode, Object3DNode.WORLD_MATRIX );
/**
* TSL function for creating an object 3D node that represents the object's position in world space.
*
* @function
* @param {Object3D?} [object3d=null] - The 3D object.
* @returns {Object3DNode<vec3>}
*/
const objectPosition = /*@__PURE__*/ nodeProxy( Object3DNode, Object3DNode.POSITION );
/**
* TSL function for creating an object 3D node that represents the object's scale in world space.
*
* @function
* @param {Object3D?} [object3d=null] - The 3D object.
* @returns {Object3DNode<vec3>}
*/
const objectScale = /*@__PURE__*/ nodeProxy( Object3DNode, Object3DNode.SCALE );
/**
* TSL function for creating an object 3D node that represents the object's position in view/camera space.
*
* @function
* @param {Object3D?} [object3d=null] - The 3D object.
* @returns {Object3DNode<vec3>}
*/
const objectViewPosition = /*@__PURE__*/ nodeProxy( Object3DNode, Object3DNode.VIEW_POSITION );
/** @module ModelNode **/
/**
* This type of node is a specialized version of `Object3DNode`
* with larger set of model related metrics. Unlike `Object3DNode`,
* `ModelNode` extracts the reference to the 3D object from the
* current node frame state.
*
* @augments module:Object3DNode~Object3DNode
*/
class ModelNode extends Object3DNode {
static get type() {
return 'ModelNode';
}
/**
* Constructs a new object model node.
*
* @param {('position'|'viewPosition'|'direction'|'scale'|'worldMatrix')} scope - The node represents a different type of transformation depending on the scope.
*/
constructor( scope ) {
super( scope );
}
/**
* Extracts the model reference from the frame state and then
* updates the uniform value depending on the scope.
*
* @param {NodeFrame} frame - The current node frame.
*/
update( frame ) {
this.object3d = frame.object;
super.update( frame );
}
}
/**
* TSL object that represents the object's direction in world space.
*
* @type {ModelNode<vec3>}
*/
const modelDirection = /*@__PURE__*/ nodeImmutable( ModelNode, ModelNode.DIRECTION );
/**
* TSL object that represents the object's world matrix.
*
* @type {ModelNode<mat4>}
*/
const modelWorldMatrix = /*@__PURE__*/ nodeImmutable( ModelNode, ModelNode.WORLD_MATRIX );
/**
* TSL object that represents the object's position in world space.
*
* @type {ModelNode<vec3>}
*/
const modelPosition = /*@__PURE__*/ nodeImmutable( ModelNode, ModelNode.POSITION );
/**
* TSL object that represents the object's scale in world space.
*
* @type {ModelNode<vec3>}
*/
const modelScale = /*@__PURE__*/ nodeImmutable( ModelNode, ModelNode.SCALE );
/**
* TSL object that represents the object's position in view/camera space.
*
* @type {ModelNode<vec3>}
*/
const modelViewPosition = /*@__PURE__*/ nodeImmutable( ModelNode, ModelNode.VIEW_POSITION );
/**
* TSL object that represents the object's normal matrix.
*
* @type {UniformNode<mat3>}
*/
const modelNormalMatrix = /*@__PURE__*/ uniform( new Matrix3() ).onObjectUpdate( ( { object }, self ) => self.value.getNormalMatrix( object.matrixWorld ) );
/**
* TSL object that represents the object's inverse world matrix.
*
* @type {UniformNode<mat4>}
*/
const modelWorldMatrixInverse = /*@__PURE__*/ uniform( new Matrix4() ).onObjectUpdate( ( { object }, self ) => self.value.copy( object.matrixWorld ).invert() );
/**
* TSL object that represents the object's model view matrix.
*
* @type {Node<mat4>}
*/
const modelViewMatrix = /*@__PURE__*/ ( Fn( ( builder ) => {
return builder.renderer.nodes.modelViewMatrix || mediumpModelViewMatrix;
} ).once() )().toVar( 'modelViewMatrix' );
// GPU Precision
/**
* TSL object that represents the object's model view in `mediump` precision.
*
* @type {Node<mat4>}
*/
const mediumpModelViewMatrix = /*@__PURE__*/ cameraViewMatrix.mul( modelWorldMatrix );
// CPU Precision
/**
* TSL object that represents the object's model view in `highp` precision
* which is achieved by computing the matrix in JS and not in the shader.
*
* @type {Node<mat4>}
*/
const highpModelViewMatrix = /*@__PURE__*/ ( Fn( ( builder ) => {
builder.context.isHighPrecisionModelViewMatrix = true;
return uniform( 'mat4' ).onObjectUpdate( ( { object, camera } ) => {
return object.modelViewMatrix.multiplyMatrices( camera.matrixWorldInverse, object.matrixWorld );
} );
} ).once() )().toVar( 'highpModelViewMatrix' );
/**
* TSL object that represents the object's model normal view in `highp` precision
* which is achieved by computing the matrix in JS and not in the shader.
*
* @type {Node<mat3>}
*/
const highpModelNormalViewMatrix = /*@__PURE__*/ ( Fn( ( builder ) => {
const isHighPrecisionModelViewMatrix = builder.context.isHighPrecisionModelViewMatrix;
return uniform( 'mat3' ).onObjectUpdate( ( { object, camera } ) => {
if ( isHighPrecisionModelViewMatrix !== true ) {
object.modelViewMatrix.multiplyMatrices( camera.matrixWorldInverse, object.matrixWorld );
}
return object.normalMatrix.getNormalMatrix( object.modelViewMatrix );
} );
} ).once() )().toVar( 'highpModelNormalViewMatrix' );
/** @module Position **/
/**
* TSL object that represents the position attribute of the current rendered object.
*
* @type {AttributeNode<vec3>}
*/
const positionGeometry = /*@__PURE__*/ attribute( 'position', 'vec3' );
/**
* TSL object that represents the vertex position in local space of the current rendered object.
*
* @type {AttributeNode<vec3>}
*/
const positionLocal = /*@__PURE__*/ positionGeometry.toVarying( 'positionLocal' );
/**
* TSL object that represents the previous vertex position in local space of the current rendered object.
* Used in context of {@link module:VelocityNode~VelocityNode} for rendering motion vectors.
*
* @type {AttributeNode<vec3>}
*/
const positionPrevious = /*@__PURE__*/ positionGeometry.toVarying( 'positionPrevious' );
/**
* TSL object that represents the vertex position in world space of the current rendered object.
*
* @type {VaryingNode<vec3>}
*/
const positionWorld = /*@__PURE__*/ modelWorldMatrix.mul( positionLocal ).xyz.toVarying( 'v_positionWorld' ).context( { needsPositionReassign: true } );
/**
* TSL object that represents the position world direction of the current rendered object.
*
* @type {Node<vec3>}
*/
const positionWorldDirection = /*@__PURE__*/ positionLocal.transformDirection( modelWorldMatrix ).toVarying( 'v_positionWorldDirection' ).normalize().toVar( 'positionWorldDirection' ).context( { needsPositionReassign: true } );
/**
* TSL object that represents the vertex position in view space of the current rendered object.
*
* @type {VaryingNode<vec3>}
*/
const positionView = /*@__PURE__*/ ( Fn( ( builder ) => {
return builder.context.setupPositionView();
}, 'vec3' ).once() )().toVarying( 'v_positionView' ).context( { needsPositionReassign: true } );
/**
* TSL object that represents the position view direction of the current rendered object.
*
* @type {VaryingNode<vec3>}
*/
const positionViewDirection = /*@__PURE__*/ positionView.negate().toVarying( 'v_positionViewDirection' ).normalize().toVar( 'positionViewDirection' );
/** @module FrontFacingNode **/
/**
* This node can be used to evaluate whether a primitive is front or back facing.
*
* @augments Node
*/
class FrontFacingNode extends Node {
static get type() {
return 'FrontFacingNode';
}
/**
* Constructs a new front facing node.
*/
constructor() {
super( 'bool' );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isFrontFacingNode = true;
}
generate( builder ) {
const { renderer, material } = builder;
if ( renderer.coordinateSystem === WebGLCoordinateSystem ) {
if ( material.side === BackSide ) {
return 'false';
}
}
return builder.getFrontFacing();
}
}
/**
* TSL object that represents whether a primitive is front or back facing
*
* @type {FrontFacingNode<bool>}
*/
const frontFacing = /*@__PURE__*/ nodeImmutable( FrontFacingNode );
/**
* TSL object that represents the front facing status as a number instead of a bool.
* `1` means front facing, `-1` means back facing.
*
* @type {Node<float>}
*/
const faceDirection = /*@__PURE__*/ float( frontFacing ).mul( 2.0 ).sub( 1.0 );
/** @module Normal **/
/**
* TSL object that represents the normal attribute of the current rendered object.
*
* @type {Node<vec3>}
*/
const normalGeometry = /*@__PURE__*/ attribute( 'normal', 'vec3' );
/**
* TSL object that represents the vertex normal in local space of the current rendered object.
*
* @type {Node<vec3>}
*/
const normalLocal = /*@__PURE__*/ ( Fn( ( builder ) => {
if ( builder.geometry.hasAttribute( 'normal' ) === false ) {
console.warn( 'TSL.NormalNode: Vertex attribute "normal" not found on geometry.' );
return vec3( 0, 1, 0 );
}
return normalGeometry;
}, 'vec3' ).once() )().toVar( 'normalLocal' );
/**
* TSL object that represents the flat vertex normal in view space of the current rendered object.
*
* @type {Node<vec3>}
*/
const normalFlat = /*@__PURE__*/ positionView.dFdx().cross( positionView.dFdy() ).normalize().toVar( 'normalFlat' );
/**
* TSL object that represents the vertex normal in view space of the current rendered object.
*
* @type {Node<vec3>}
*/
const normalView = /*@__PURE__*/ ( Fn( ( builder ) => {
let node;
if ( builder.material.flatShading === true ) {
node = normalFlat;
} else {
node = varying( transformNormalToView( normalLocal ), 'v_normalView' ).normalize();
}
return node;
}, 'vec3' ).once() )().toVar( 'normalView' );
/**
* TSL object that represents the vertex normal in world space of the current rendered object.
*
* @type {Node<vec3>}
*/
const normalWorld = /*@__PURE__*/ varying( normalView.transformDirection( cameraViewMatrix ), 'v_normalWorld' ).normalize().toVar( 'normalWorld' );
/**
* TSL object that represents the transformed vertex normal in view space of the current rendered object.
*
* @type {Node<vec3>}
*/
const transformedNormalView = /*@__PURE__*/ ( Fn( ( builder ) => {
// Use getUV context to avoid side effects from nodes overwriting getUV in the context (e.g. EnvironmentNode)
return builder.context.setupNormal().context( { getUV: null } );
}, 'vec3' ).once() )().mul( faceDirection ).toVar( 'transformedNormalView' );
/**
* TSL object that represents the transformed vertex normal in world space of the current rendered object.
*
* @type {Node<vec3>}
*/
const transformedNormalWorld = /*@__PURE__*/ transformedNormalView.transformDirection( cameraViewMatrix ).toVar( 'transformedNormalWorld' );
/**
* TSL object that represents the transformed clearcoat vertex normal in view space of the current rendered object.
*
* @type {Node<vec3>}
*/
const transformedClearcoatNormalView = /*@__PURE__*/ ( Fn( ( builder ) => {
// Use getUV context to avoid side effects from nodes overwriting getUV in the context (e.g. EnvironmentNode)
return builder.context.setupClearcoatNormal().context( { getUV: null } );
}, 'vec3' ).once() )().mul( faceDirection ).toVar( 'transformedClearcoatNormalView' );
/**
* Transforms the normal with the given matrix.
*
* @function
* @param {Node<vec3>} normal - The normal.
* @param {Node<mat3>} [matrix=modelWorldMatrix] - The matrix.
* @return {Node<vec3>} The transformed normal.
*/
const transformNormal = /*@__PURE__*/ Fn( ( [ normal, matrix = modelWorldMatrix ] ) => {
const m = mat3( matrix );
const transformedNormal = normal.div( vec3( m[ 0 ].dot( m[ 0 ] ), m[ 1 ].dot( m[ 1 ] ), m[ 2 ].dot( m[ 2 ] ) ) );
return m.mul( transformedNormal ).xyz;
} );
/**
* Transforms the given normal from local to view space.
*
* @function
* @param {Node<vec3>} normal - The normal.
* @param {NodeBuilder} builder - The current node builder.
* @return {Node<vec3>} The transformed normal.
*/
const transformNormalToView = /*@__PURE__*/ Fn( ( [ normal ], builder ) => {
const modelNormalViewMatrix = builder.renderer.nodes.modelNormalViewMatrix;
if ( modelNormalViewMatrix !== null ) {
return modelNormalViewMatrix.transformDirection( normal );
}
//
const transformedNormal = modelNormalMatrix.mul( normal );
return cameraViewMatrix.transformDirection( transformedNormal );
} );
/** @module MaterialProperties **/
/**
* TSL object that represents the refraction ratio of the material used for rendering the current object.
*
* @type {UniformNode<float>}
*/
const materialRefractionRatio = /*@__PURE__*/ uniform( 0 ).onReference( ( { material } ) => material ).onRenderUpdate( ( { material } ) => material.refractionRatio );
/** @module ReflectVector **/
/**
* The reflect vector in view space.
*
* @type {Node<vec3>}
*/
const reflectView = /*@__PURE__*/ positionViewDirection.negate().reflect( transformedNormalView );
/**
* The refract vector in view space.
*
* @type {Node<vec3>}
*/
const refractView = /*@__PURE__*/ positionViewDirection.negate().refract( transformedNormalView, materialRefractionRatio );
/**
* Used for sampling cube maps when using cube reflection mapping.
*
* @type {Node<vec3>}
*/
const reflectVector = /*@__PURE__*/ reflectView.transformDirection( cameraViewMatrix ).toVar( 'reflectVector' );
/**
* Used for sampling cube maps when using cube refraction mapping.
*
* @type {Node<vec3>}
*/
const refractVector = /*@__PURE__*/ refractView.transformDirection( cameraViewMatrix ).toVar( 'reflectVector' );
/** @module CubeTextureNode **/
/**
* This type of uniform node represents a cube texture.
*
* @augments module:TextureNode~TextureNode
*/
class CubeTextureNode extends TextureNode {
static get type() {
return 'CubeTextureNode';
}
/**
* Constructs a new cube texture node.
*
* @param {CubeTexture} value - The cube texture.
* @param {Node<vec3>?} [uvNode=null] - The uv node.
* @param {Node<int>?} [levelNode=null] - The level node.
* @param {Node<float>?} [biasNode=null] - The bias node.
*/
constructor( value, uvNode = null, levelNode = null, biasNode = null ) {
super( value, uvNode, levelNode, biasNode );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isCubeTextureNode = true;
}
/**
* Overwrites the default implementation to return a fixed value `'cubeTexture'`.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The input type.
*/
getInputType( /*builder*/ ) {
return 'cubeTexture';
}
/**
* Returns a default uvs based on the mapping type of the cube texture.
*
* @return {Node<vec3>} The default uv attribute.
*/
getDefaultUV() {
const texture = this.value;
if ( texture.mapping === CubeReflectionMapping ) {
return reflectVector;
} else if ( texture.mapping === CubeRefractionMapping ) {
return refractVector;
} else {
console.error( 'THREE.CubeTextureNode: Mapping "%s" not supported.', texture.mapping );
return vec3( 0, 0, 0 );
}
}
/**
* Overwritten with an empty implementation since the `updateMatrix` flag is ignored
* for cube textures. The uv transformation matrix is not applied to cube textures.
*
* @param {Boolean} value - The update toggle.
*/
setUpdateMatrix( /*updateMatrix*/ ) { } // Ignore .updateMatrix for CubeTextureNode
/**
* Setups the uv node. Depending on the backend as well as the texture type, it might be necessary
* to modify the uv node for correct sampling.
*
* @param {NodeBuilder} builder - The current node builder.
* @param {Node} uvNode - The uv node to setup.
* @return {Node} The updated uv node.
*/
setupUV( builder, uvNode ) {
const texture = this.value;
if ( builder.renderer.coordinateSystem === WebGPUCoordinateSystem || ! texture.isRenderTargetTexture ) {
return vec3( uvNode.x.negate(), uvNode.yz );
} else {
return uvNode;
}
}
/**
* Generates the uv code snippet.
*
* @param {NodeBuilder} builder - The current node builder.
* @param {Node} cubeUV - The uv node to generate code for.
* @return {String} The generated code snippet.
*/
generateUV( builder, cubeUV ) {
return cubeUV.build( builder, 'vec3' );
}
}
/**
* TSL function for creating a cube texture node.
*
* @function
* @param {CubeTexture} value - The cube texture.
* @param {Node<vec3>?} [uvNode=null] - The uv node.
* @param {Node<int>?} [levelNode=null] - The level node.
* @param {Node<float>?} [biasNode=null] - The bias node.
* @returns {CubeTextureNode}
*/
const cubeTexture = /*@__PURE__*/ nodeProxy( CubeTextureNode );
// TODO: Avoid duplicated code and ues only ReferenceBaseNode or ReferenceNode
/** @module ReferenceNode **/
/**
* This class is only relevant if the referenced property is array-like.
* In this case, `ReferenceElementNode` allows to refer to a specific
* element inside the data structure via an index.
*
* @augments ArrayElementNode
*/
class ReferenceElementNode extends ArrayElementNode {
static get type() {
return 'ReferenceElementNode';
}
/**
* Constructs a new reference element node.
*
* @param {ReferenceNode?} referenceNode - The reference node.
* @param {Node} indexNode - The index node that defines the element access.
*/
constructor( referenceNode, indexNode ) {
super( referenceNode, indexNode );
/**
* Similar to {@link module:ReferenceNode~ReferenceNode#reference}, an additional
* property references to the current node.
*
* @type {ReferenceNode?}
* @default null
*/
this.referenceNode = referenceNode;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isReferenceElementNode = true;
}
/**
* This method is overwritten since the node type is inferred from
* the uniform type of the reference node.
*
* @return {String} The node type.
*/
getNodeType() {
return this.referenceNode.uniformType;
}
generate( builder ) {
const snippet = super.generate( builder );
const arrayType = this.referenceNode.getNodeType();
const elementType = this.getNodeType();
return builder.format( snippet, arrayType, elementType );
}
}
/**
* This type of node establishes a reference to a property of another object.
* In this way, the value of the node is automatically linked to the value of
* referenced object. Reference nodes internally represent the linked value
* as a uniform.
*
* @augments Node
*/
class ReferenceNode extends Node {
static get type() {
return 'ReferenceNode';
}
/**
* Constructs a new reference node.
*
* @param {String} property - The name of the property the node refers to.
* @param {String} uniformType - The uniform type that should be used to represent the property value.
* @param {Object?} [object=null] - The object the property belongs to.
* @param {Number?} [count=null] - When the linked property is an array-like, this parameter defines its length.
*/
constructor( property, uniformType, object = null, count = null ) {
super();
/**
* The name of the property the node refers to.
*
* @type {String}
*/
this.property = property;
/**
* The uniform type that should be used to represent the property value.
*
* @type {String}
*/
this.uniformType = uniformType;
/**
* The object the property belongs to.
*
* @type {Object?}
* @default null
*/
this.object = object;
/**
* When the linked property is an array, this parameter defines its length.
*
* @type {Number?}
* @default null
*/
this.count = count;
/**
* The property name might have dots so nested properties can be referred.
* The hierarchy of the names is stored inside this array.
*
* @type {Array<String>}
*/
this.properties = property.split( '.' );
/**
* Points to the current referred object. This property exists next to {@link module:ReferenceNode~ReferenceNode#object}
* since the final reference might be updated from calling code.
*
* @type {Object?}
* @default null
*/
this.reference = object;
/**
* The uniform node that holds the value of the reference node.
*
* @type {UniformNode}
* @default null
*/
this.node = null;
/**
* The uniform group of the internal uniform.
*
* @type {UniformGroupNode}
* @default null
*/
this.group = null;
/**
* An optional label of the internal uniform node.
*
* @type {String?}
* @default null
*/
this.name = null;
/**
* Overwritten since reference nodes are updated per object.
*
* @type {String}
* @default 'object'
*/
this.updateType = NodeUpdateType.OBJECT;
}
/**
* When the referred property is array-like, this method can be used
* to access elements via an index node.
*
* @param {IndexNode} indexNode - indexNode.
* @return {ReferenceElementNode} A reference to an element.
*/
element( indexNode ) {
return nodeObject( new ReferenceElementNode( this, nodeObject( indexNode ) ) );
}
/**
* Sets the uniform group for this reference node.
*
* @param {UniformGroupNode} group - The uniform group to set.
* @return {ReferenceNode} A reference to this node.
*/
setGroup( group ) {
this.group = group;
return this;
}
/**
* Sets the label for the internal uniform.
*
* @param {String} name - The label to set.
* @return {ReferenceNode} A reference to this node.
*/
label( name ) {
this.name = name;
return this;
}
/**
* Sets the node type which automatically defines the internal
* uniform type.
*
* @param {String} uniformType - The type to set.
*/
setNodeType( uniformType ) {
let node = null;
if ( this.count !== null ) {
node = buffer( null, uniformType, this.count );
} else if ( Array.isArray( this.getValueFromReference() ) ) {
node = uniformArray( null, uniformType );
} else if ( uniformType === 'texture' ) {
node = texture( null );
} else if ( uniformType === 'cubeTexture' ) {
node = cubeTexture( null );
} else {
node = uniform( null, uniformType );
}
if ( this.group !== null ) {
node.setGroup( this.group );
}
if ( this.name !== null ) node.label( this.name );
this.node = node.getSelf();
}
/**
* This method is overwritten since the node type is inferred from
* the type of the reference node.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The node type.
*/
getNodeType( builder ) {
if ( this.node === null ) {
this.updateReference( builder );
this.updateValue();
}
return this.node.getNodeType( builder );
}
/**
* Returns the property value from the given referred object.
*
* @param {Object} [object=this.reference] - The object to retrieve the property value from.
* @return {Any} The value.
*/
getValueFromReference( object = this.reference ) {
const { properties } = this;
let value = object[ properties[ 0 ] ];
for ( let i = 1; i < properties.length; i ++ ) {
value = value[ properties[ i ] ];
}
return value;
}
/**
* Allows to update the reference based on the given state. The state is only
* evaluated {@link module:ReferenceNode~ReferenceNode#object} is not set.
*
* @param {(NodeFrame|NodeBuilder)} state - The current state.
* @return {Object} The updated reference.
*/
updateReference( state ) {
this.reference = this.object !== null ? this.object : state.object;
return this.reference;
}
/**
* The output of the reference node is the internal uniform node.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {UniformNode} The output node.
*/
setup( /* builder */ ) {
this.updateValue();
return this.node;
}
/**
* Overwritten to to update the internal uniform value.
*
* @param {NodeFrame} frame - A reference to the current node frame.
*/
update( /*frame*/ ) {
this.updateValue();
}
/**
* Retrieves the value from the referred object property and uses it
* to updated the internal uniform.
*/
updateValue() {
if ( this.node === null ) this.setNodeType( this.uniformType );
const value = this.getValueFromReference();
if ( Array.isArray( value ) ) {
this.node.array = value;
} else {
this.node.value = value;
}
}
}
/**
* TSL function for creating a reference node.
*
* @function
* @param {String} name - The name of the property the node refers to.
* @param {String} type - The uniform type that should be used to represent the property value.
* @param {Object} object - The object the property belongs to.
* @returns {ReferenceNode}
*/
const reference = ( name, type, object ) => nodeObject( new ReferenceNode( name, type, object ) );
/**
* TSL function for creating a reference node. Use this function if you want need a reference
* to an array-like property that should be represented as a uniform buffer.
*
* @function
* @param {String} name - The name of the property the node refers to.
* @param {String} type - The uniform type that should be used to represent the property value.
* @param {Number} count - The number of value inside the array-like object.
* @param {Object} object - An array-like object the property belongs to.
* @returns {ReferenceNode}
*/
const referenceBuffer = ( name, type, count, object ) => nodeObject( new ReferenceNode( name, type, object, count ) );
/** @module MaterialReferenceNode **/
/**
* This node is a special type of reference node which is intended
* for linking material properties with node values.
* ```js
* const opacityNode = materialReference( 'opacity', 'float', material );
* ```
* When changing `material.opacity`, the node value of `opacityNode` will
* automatically be updated.
*
* @augments module:ReferenceNode~ReferenceNode
*/
class MaterialReferenceNode extends ReferenceNode {
static get type() {
return 'MaterialReferenceNode';
}
/**
* Constructs a new material reference node.
*
* @param {String} property - The name of the property the node refers to.
* @param {String} inputType - The uniform type that should be used to represent the property value.
* @param {Material?} [material=null] - The material the property belongs to. When no material is set,
* the node refers to the material of the current rendered object.
*/
constructor( property, inputType, material = null ) {
super( property, inputType, material );
/**
* The material the property belongs to. When no material is set,
* the node refers to the material of the current rendered object.
*
* @type {Material?}
* @default null
*/
this.material = material;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isMaterialReferenceNode = true;
}
/**
* Updates the reference based on the given state. The state is only evaluated
* {@link module:MaterialReferenceNode~MaterialReferenceNode#material} is not set.
*
* @param {(NodeFrame|NodeBuilder)} state - The current state.
* @return {Object} The updated reference.
*/
updateReference( state ) {
this.reference = this.material !== null ? this.material : state.material;
return this.reference;
}
}
/**
* TSL function for creating a material reference node.
*
* @function
* @param {String} name - The name of the property the node refers to.
* @param {String} type - The uniform type that should be used to represent the property value.
* @param {Material?} [material=null] - The material the property belongs to.
* When no material is set, the node refers to the material of the current rendered object.
* @returns {MaterialReferenceNode}
*/
const materialReference = ( name, type, material = null ) => nodeObject( new MaterialReferenceNode( name, type, material ) );
/** @module Tangent **/
/**
* TSL object that represents the tangent attribute of the current rendered object.
*
* @type {Node<vec4>}
*/
const tangentGeometry = /*@__PURE__*/ Fn( ( builder ) => {
if ( builder.geometry.hasAttribute( 'tangent' ) === false ) {
builder.geometry.computeTangents();
}
return attribute( 'tangent', 'vec4' );
} )();
/**
* TSL object that represents the vertex tangent in local space of the current rendered object.
*
* @type {Node<vec3>}
*/
const tangentLocal = /*@__PURE__*/ tangentGeometry.xyz.toVar( 'tangentLocal' );
/**
* TSL object that represents the vertex tangent in view space of the current rendered object.
*
* @type {Node<vec3>}
*/
const tangentView = /*@__PURE__*/ modelViewMatrix.mul( vec4( tangentLocal, 0 ) ).xyz.toVarying( 'v_tangentView' ).normalize().toVar( 'tangentView' );
/**
* TSL object that represents the vertex tangent in world space of the current rendered object.
*
* @type {Node<vec3>}
*/
const tangentWorld = /*@__PURE__*/ tangentView.transformDirection( cameraViewMatrix ).toVarying( 'v_tangentWorld' ).normalize().toVar( 'tangentWorld' );
/**
* TSL object that represents the transformed vertex tangent in view space of the current rendered object.
*
* @type {Node<vec3>}
*/
const transformedTangentView = /*@__PURE__*/ tangentView.toVar( 'transformedTangentView' );
/**
* TSL object that represents the transformed vertex tangent in world space of the current rendered object.
*
* @type {Node<vec3>}
*/
const transformedTangentWorld = /*@__PURE__*/ transformedTangentView.transformDirection( cameraViewMatrix ).normalize().toVar( 'transformedTangentWorld' );
/** @module Bitangent **/
const getBitangent = ( crossNormalTangent ) => crossNormalTangent.mul( tangentGeometry.w ).xyz;
/**
* TSL object that represents the bitangent attribute of the current rendered object.
*
* @type {Node<vec3>}
*/
const bitangentGeometry = /*@__PURE__*/ varying( getBitangent( normalGeometry.cross( tangentGeometry ) ), 'v_bitangentGeometry' ).normalize().toVar( 'bitangentGeometry' );
/**
* TSL object that represents the vertex bitangent in local space of the current rendered object.
*
* @type {Node<vec3>}
*/
const bitangentLocal = /*@__PURE__*/ varying( getBitangent( normalLocal.cross( tangentLocal ) ), 'v_bitangentLocal' ).normalize().toVar( 'bitangentLocal' );
/**
* TSL object that represents the vertex bitangent in view space of the current rendered object.
*
* @type {Node<vec4>}
*/
const bitangentView = /*@__PURE__*/ varying( getBitangent( normalView.cross( tangentView ) ), 'v_bitangentView' ).normalize().toVar( 'bitangentView' );
/**
* TSL object that represents the vertex bitangent in world space of the current rendered object.
*
* @type {Node<vec4>}
*/
const bitangentWorld = /*@__PURE__*/ varying( getBitangent( normalWorld.cross( tangentWorld ) ), 'v_bitangentWorld' ).normalize().toVar( 'bitangentWorld' );
/**
* TSL object that represents the transformed vertex bitangent in view space of the current rendered object.
*
* @type {Node<vec4>}
*/
const transformedBitangentView = /*@__PURE__*/ getBitangent( transformedNormalView.cross( transformedTangentView ) ).normalize().toVar( 'transformedBitangentView' );
/**
* TSL object that represents the transformed vertex bitangent in world space of the current rendered object.
*
* @type {Node<vec4>}
*/
const transformedBitangentWorld = /*@__PURE__*/ transformedBitangentView.transformDirection( cameraViewMatrix ).normalize().toVar( 'transformedBitangentWorld' );
/** @module AccessorsUtils **/
/**
* TSL object that represents the TBN matrix in view space.
*
* @type {Node<mat3>}
*/
const TBNViewMatrix = /*@__PURE__*/ mat3( tangentView, bitangentView, normalView );
/**
* TSL object that represents the parallax direction.
*
* @type {Node<mat3>}
*/
const parallaxDirection = /*@__PURE__*/ positionViewDirection.mul( TBNViewMatrix )/*.normalize()*/;
/**
* TSL function for computing parallax uv coordinates.
*
* @function
* @param {Node<vec2>} uv - A uv node.
* @param {Node<vec2>} scale - A scale node.
* @returns {Node<vec2>} Parallax uv coordinates.
*/
const parallaxUV = ( uv, scale ) => uv.sub( parallaxDirection.mul( scale ) );
/**
* TSL function for computing bent normals.
*
* @function
* @returns {Node<vec3>} Bent normals.
*/
const transformedBentNormalView = /*@__PURE__*/ ( () => {
// https://google.github.io/filament/Filament.md.html#lighting/imagebasedlights/anisotropy
let bentNormal = anisotropyB.cross( positionViewDirection );
bentNormal = bentNormal.cross( anisotropyB ).normalize();
bentNormal = mix( bentNormal, transformedNormalView, anisotropy.mul( roughness.oneMinus() ).oneMinus().pow2().pow2() ).normalize();
return bentNormal;
} )();
/** @module NormalMapNode **/
// Normal Mapping Without Precomputed Tangents
// http://www.thetenthplanet.de/archives/1180
const perturbNormal2Arb = /*@__PURE__*/ Fn( ( inputs ) => {
const { eye_pos, surf_norm, mapN, uv } = inputs;
const q0 = eye_pos.dFdx();
const q1 = eye_pos.dFdy();
const st0 = uv.dFdx();
const st1 = uv.dFdy();
const N = surf_norm; // normalized
const q1perp = q1.cross( N );
const q0perp = N.cross( q0 );
const T = q1perp.mul( st0.x ).add( q0perp.mul( st1.x ) );
const B = q1perp.mul( st0.y ).add( q0perp.mul( st1.y ) );
const det = T.dot( T ).max( B.dot( B ) );
const scale = faceDirection.mul( det.inverseSqrt() );
return add( T.mul( mapN.x, scale ), B.mul( mapN.y, scale ), N.mul( mapN.z ) ).normalize();
} );
/**
* This class can be used for applying normals maps to materials.
*
* ```js
* material.normalNode = normalMap( texture( normalTex ) );
* ```
*
* @augments TempNode
*/
class NormalMapNode extends TempNode {
static get type() {
return 'NormalMapNode';
}
/**
* Constructs a new normal map node.
*
* @param {Node<vec3>} node - Represents the normal map data.
* @param {Node<vec2>?} [scaleNode=null] - Controls the intensity of the effect.
*/
constructor( node, scaleNode = null ) {
super( 'vec3' );
/**
* Represents the normal map data.
*
* @type {Node<vec3>}
*/
this.node = node;
/**
* Controls the intensity of the effect.
*
* @type {Node<vec2>?}
* @default null
*/
this.scaleNode = scaleNode;
/**
* The normal map type.
*
* @type {(TangentSpaceNormalMap|ObjectSpaceNormalMap)}
* @default TangentSpaceNormalMap
*/
this.normalMapType = TangentSpaceNormalMap;
}
setup( builder ) {
const { normalMapType, scaleNode } = this;
let normalMap = this.node.mul( 2.0 ).sub( 1.0 );
if ( scaleNode !== null ) {
normalMap = vec3( normalMap.xy.mul( scaleNode ), normalMap.z );
}
let outputNode = null;
if ( normalMapType === ObjectSpaceNormalMap ) {
outputNode = transformNormalToView( normalMap );
} else if ( normalMapType === TangentSpaceNormalMap ) {
const tangent = builder.hasGeometryAttribute( 'tangent' );
if ( tangent === true ) {
outputNode = TBNViewMatrix.mul( normalMap ).normalize();
} else {
outputNode = perturbNormal2Arb( {
eye_pos: positionView,
surf_norm: normalView,
mapN: normalMap,
uv: uv()
} );
}
}
return outputNode;
}
}
/**
* TSL function for creating a normal map node.
*
* @function
* @param {Node<vec3>} node - Represents the normal map data.
* @param {Node<vec2>?} [scaleNode=null] - Controls the intensity of the effect.
* @returns {NormalMapNode}
*/
const normalMap = /*@__PURE__*/ nodeProxy( NormalMapNode );
/** @module BumpMapNode **/
// Bump Mapping Unparametrized Surfaces on the GPU by Morten S. Mikkelsen
// https://mmikk.github.io/papers3d/mm_sfgrad_bump.pdf
const dHdxy_fwd = Fn( ( { textureNode, bumpScale } ) => {
// It's used to preserve the same TextureNode instance
const sampleTexture = ( callback ) => textureNode.cache().context( { getUV: ( texNode ) => callback( texNode.uvNode || uv() ), forceUVContext: true } );
const Hll = float( sampleTexture( ( uvNode ) => uvNode ) );
return vec2(
float( sampleTexture( ( uvNode ) => uvNode.add( uvNode.dFdx() ) ) ).sub( Hll ),
float( sampleTexture( ( uvNode ) => uvNode.add( uvNode.dFdy() ) ) ).sub( Hll )
).mul( bumpScale );
} );
// Evaluate the derivative of the height w.r.t. screen-space using forward differencing (listing 2)
const perturbNormalArb = Fn( ( inputs ) => {
const { surf_pos, surf_norm, dHdxy } = inputs;
// normalize is done to ensure that the bump map looks the same regardless of the texture's scale
const vSigmaX = surf_pos.dFdx().normalize();
const vSigmaY = surf_pos.dFdy().normalize();
const vN = surf_norm; // normalized
const R1 = vSigmaY.cross( vN );
const R2 = vN.cross( vSigmaX );
const fDet = vSigmaX.dot( R1 ).mul( faceDirection );
const vGrad = fDet.sign().mul( dHdxy.x.mul( R1 ).add( dHdxy.y.mul( R2 ) ) );
return fDet.abs().mul( surf_norm ).sub( vGrad ).normalize();
} );
/**
* This class can be used for applying bump maps to materials.
*
* ```js
* material.normalNode = bumpMap( texture( bumpTex ) );
* ```
*
* @augments TempNode
*/
class BumpMapNode extends TempNode {
static get type() {
return 'BumpMapNode';
}
/**
* Constructs a new bump map node.
*
* @param {Node<float>} textureNode - Represents the bump map data.
* @param {Node<float>?} [scaleNode=null] - Controls the intensity of the bump effect.
*/
constructor( textureNode, scaleNode = null ) {
super( 'vec3' );
/**
* Represents the bump map data.
*
* @type {Node<float>}
*/
this.textureNode = textureNode;
/**
* Controls the intensity of the bump effect.
*
* @type {Node<float>?}
* @default null
*/
this.scaleNode = scaleNode;
}
setup() {
const bumpScale = this.scaleNode !== null ? this.scaleNode : 1;
const dHdxy = dHdxy_fwd( { textureNode: this.textureNode, bumpScale } );
return perturbNormalArb( {
surf_pos: positionView,
surf_norm: normalView,
dHdxy
} );
}
}
/**
* TSL function for creating a bump map node.
*
* @function
* @param {Node<float>} textureNode - Represents the bump map data.
* @param {Node<float>?} [scaleNode=null] - Controls the intensity of the bump effect.
* @returns {BumpMapNode}
*/
const bumpMap = /*@__PURE__*/ nodeProxy( BumpMapNode );
/** @module MaterialNode **/
const _propertyCache = new Map();
/**
* This class should simplify the node access to material properties.
* It internal uses reference nodes to make sure changes to material
* properties are automatically reflected to predefined TSL objects
* like e.g. `materialColor`.
*
* @augments Node
*/
class MaterialNode extends Node {
static get type() {
return 'MaterialNode';
}
/**
* Constructs a new material node.
*
* @param {String} scope - The scope defines what kind of material property is referred by the node.
*/
constructor( scope ) {
super();
/**
* The scope defines what material property is referred by the node.
*
* @type {String}
*/
this.scope = scope;
}
/**
* Returns a cached reference node for the given property and type.
*
* @param {String} property - The name of the material property.
* @param {String} type - The uniform type of the property.
* @return {MaterialReferenceNode} A material reference node representing the property access.
*/
getCache( property, type ) {
let node = _propertyCache.get( property );
if ( node === undefined ) {
node = materialReference( property, type );
_propertyCache.set( property, node );
}
return node;
}
/**
* Returns a float-typed material reference node for the given property name.
*
* @param {String} property - The name of the material property.
* @return {MaterialReferenceNode<float>} A material reference node representing the property access.
*/
getFloat( property ) {
return this.getCache( property, 'float' );
}
/**
* Returns a color-typed material reference node for the given property name.
*
* @param {String} property - The name of the material property.
* @return {MaterialReferenceNode<color>} A material reference node representing the property access.
*/
getColor( property ) {
return this.getCache( property, 'color' );
}
/**
* Returns a texture-typed material reference node for the given property name.
*
* @param {String} property - The name of the material property.
* @return {MaterialReferenceNode} A material reference node representing the property access.
*/
getTexture( property ) {
return this.getCache( property === 'map' ? 'map' : property + 'Map', 'texture' );
}
/**
* The node setup is done depending on the selected scope. Multiple material properties
* might be grouped into a single node composition if they logically belong together.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {Node} The node representing the selected scope.
*/
setup( builder ) {
const material = builder.context.material;
const scope = this.scope;
let node = null;
if ( scope === MaterialNode.COLOR ) {
const colorNode = material.color !== undefined ? this.getColor( scope ) : vec3();
if ( material.map && material.map.isTexture === true ) {
node = colorNode.mul( this.getTexture( 'map' ) );
} else {
node = colorNode;
}
} else if ( scope === MaterialNode.OPACITY ) {
const opacityNode = this.getFloat( scope );
if ( material.alphaMap && material.alphaMap.isTexture === true ) {
node = opacityNode.mul( this.getTexture( 'alpha' ) );
} else {
node = opacityNode;
}
} else if ( scope === MaterialNode.SPECULAR_STRENGTH ) {
if ( material.specularMap && material.specularMap.isTexture === true ) {
node = this.getTexture( 'specular' ).r;
} else {
node = float( 1 );
}
} else if ( scope === MaterialNode.SPECULAR_INTENSITY ) {
const specularIntensityNode = this.getFloat( scope );
if ( material.specularIntensityMap && material.specularIntensityMap.isTexture === true ) {
node = specularIntensityNode.mul( this.getTexture( scope ).a );
} else {
node = specularIntensityNode;
}
} else if ( scope === MaterialNode.SPECULAR_COLOR ) {
const specularColorNode = this.getColor( scope );
if ( material.specularColorMap && material.specularColorMap.isTexture === true ) {
node = specularColorNode.mul( this.getTexture( scope ).rgb );
} else {
node = specularColorNode;
}
} else if ( scope === MaterialNode.ROUGHNESS ) { // TODO: cleanup similar branches
const roughnessNode = this.getFloat( scope );
if ( material.roughnessMap && material.roughnessMap.isTexture === true ) {
node = roughnessNode.mul( this.getTexture( scope ).g );
} else {
node = roughnessNode;
}
} else if ( scope === MaterialNode.METALNESS ) {
const metalnessNode = this.getFloat( scope );
if ( material.metalnessMap && material.metalnessMap.isTexture === true ) {
node = metalnessNode.mul( this.getTexture( scope ).b );
} else {
node = metalnessNode;
}
} else if ( scope === MaterialNode.EMISSIVE ) {
const emissiveIntensityNode = this.getFloat( 'emissiveIntensity' );
const emissiveNode = this.getColor( scope ).mul( emissiveIntensityNode );
if ( material.emissiveMap && material.emissiveMap.isTexture === true ) {
node = emissiveNode.mul( this.getTexture( scope ) );
} else {
node = emissiveNode;
}
} else if ( scope === MaterialNode.NORMAL ) {
if ( material.normalMap ) {
node = normalMap( this.getTexture( 'normal' ), this.getCache( 'normalScale', 'vec2' ) );
node.normalMapType = material.normalMapType;
} else if ( material.bumpMap ) {
node = bumpMap( this.getTexture( 'bump' ).r, this.getFloat( 'bumpScale' ) );
} else {
node = normalView;
}
} else if ( scope === MaterialNode.CLEARCOAT ) {
const clearcoatNode = this.getFloat( scope );
if ( material.clearcoatMap && material.clearcoatMap.isTexture === true ) {
node = clearcoatNode.mul( this.getTexture( scope ).r );
} else {
node = clearcoatNode;
}
} else if ( scope === MaterialNode.CLEARCOAT_ROUGHNESS ) {
const clearcoatRoughnessNode = this.getFloat( scope );
if ( material.clearcoatRoughnessMap && material.clearcoatRoughnessMap.isTexture === true ) {
node = clearcoatRoughnessNode.mul( this.getTexture( scope ).r );
} else {
node = clearcoatRoughnessNode;
}
} else if ( scope === MaterialNode.CLEARCOAT_NORMAL ) {
if ( material.clearcoatNormalMap ) {
node = normalMap( this.getTexture( scope ), this.getCache( scope + 'Scale', 'vec2' ) );
} else {
node = normalView;
}
} else if ( scope === MaterialNode.SHEEN ) {
const sheenNode = this.getColor( 'sheenColor' ).mul( this.getFloat( 'sheen' ) ); // Move this mul() to CPU
if ( material.sheenColorMap && material.sheenColorMap.isTexture === true ) {
node = sheenNode.mul( this.getTexture( 'sheenColor' ).rgb );
} else {
node = sheenNode;
}
} else if ( scope === MaterialNode.SHEEN_ROUGHNESS ) {
const sheenRoughnessNode = this.getFloat( scope );
if ( material.sheenRoughnessMap && material.sheenRoughnessMap.isTexture === true ) {
node = sheenRoughnessNode.mul( this.getTexture( scope ).a );
} else {
node = sheenRoughnessNode;
}
node = node.clamp( 0.07, 1.0 );
} else if ( scope === MaterialNode.ANISOTROPY ) {
if ( material.anisotropyMap && material.anisotropyMap.isTexture === true ) {
const anisotropyPolar = this.getTexture( scope );
const anisotropyMat = mat2( materialAnisotropyVector.x, materialAnisotropyVector.y, materialAnisotropyVector.y.negate(), materialAnisotropyVector.x );
node = anisotropyMat.mul( anisotropyPolar.rg.mul( 2.0 ).sub( vec2( 1.0 ) ).normalize().mul( anisotropyPolar.b ) );
} else {
node = materialAnisotropyVector;
}
} else if ( scope === MaterialNode.IRIDESCENCE_THICKNESS ) {
const iridescenceThicknessMaximum = reference( '1', 'float', material.iridescenceThicknessRange );
if ( material.iridescenceThicknessMap ) {
const iridescenceThicknessMinimum = reference( '0', 'float', material.iridescenceThicknessRange );
node = iridescenceThicknessMaximum.sub( iridescenceThicknessMinimum ).mul( this.getTexture( scope ).g ).add( iridescenceThicknessMinimum );
} else {
node = iridescenceThicknessMaximum;
}
} else if ( scope === MaterialNode.TRANSMISSION ) {
const transmissionNode = this.getFloat( scope );
if ( material.transmissionMap ) {
node = transmissionNode.mul( this.getTexture( scope ).r );
} else {
node = transmissionNode;
}
} else if ( scope === MaterialNode.THICKNESS ) {
const thicknessNode = this.getFloat( scope );
if ( material.thicknessMap ) {
node = thicknessNode.mul( this.getTexture( scope ).g );
} else {
node = thicknessNode;
}
} else if ( scope === MaterialNode.IOR ) {
node = this.getFloat( scope );
} else if ( scope === MaterialNode.LIGHT_MAP ) {
node = this.getTexture( scope ).rgb.mul( this.getFloat( 'lightMapIntensity' ) );
} else if ( scope === MaterialNode.AO ) {
node = this.getTexture( scope ).r.sub( 1.0 ).mul( this.getFloat( 'aoMapIntensity' ) ).add( 1.0 );
} else {
const outputType = this.getNodeType( builder );
node = this.getCache( scope, outputType );
}
return node;
}
}
MaterialNode.ALPHA_TEST = 'alphaTest';
MaterialNode.COLOR = 'color';
MaterialNode.OPACITY = 'opacity';
MaterialNode.SHININESS = 'shininess';
MaterialNode.SPECULAR = 'specular';
MaterialNode.SPECULAR_STRENGTH = 'specularStrength';
MaterialNode.SPECULAR_INTENSITY = 'specularIntensity';
MaterialNode.SPECULAR_COLOR = 'specularColor';
MaterialNode.REFLECTIVITY = 'reflectivity';
MaterialNode.ROUGHNESS = 'roughness';
MaterialNode.METALNESS = 'metalness';
MaterialNode.NORMAL = 'normal';
MaterialNode.CLEARCOAT = 'clearcoat';
MaterialNode.CLEARCOAT_ROUGHNESS = 'clearcoatRoughness';
MaterialNode.CLEARCOAT_NORMAL = 'clearcoatNormal';
MaterialNode.EMISSIVE = 'emissive';
MaterialNode.ROTATION = 'rotation';
MaterialNode.SHEEN = 'sheen';
MaterialNode.SHEEN_ROUGHNESS = 'sheenRoughness';
MaterialNode.ANISOTROPY = 'anisotropy';
MaterialNode.IRIDESCENCE = 'iridescence';
MaterialNode.IRIDESCENCE_IOR = 'iridescenceIOR';
MaterialNode.IRIDESCENCE_THICKNESS = 'iridescenceThickness';
MaterialNode.IOR = 'ior';
MaterialNode.TRANSMISSION = 'transmission';
MaterialNode.THICKNESS = 'thickness';
MaterialNode.ATTENUATION_DISTANCE = 'attenuationDistance';
MaterialNode.ATTENUATION_COLOR = 'attenuationColor';
MaterialNode.LINE_SCALE = 'scale';
MaterialNode.LINE_DASH_SIZE = 'dashSize';
MaterialNode.LINE_GAP_SIZE = 'gapSize';
MaterialNode.LINE_WIDTH = 'linewidth';
MaterialNode.LINE_DASH_OFFSET = 'dashOffset';
MaterialNode.POINT_SIZE = 'size';
MaterialNode.DISPERSION = 'dispersion';
MaterialNode.LIGHT_MAP = 'light';
MaterialNode.AO = 'ao';
/**
* TSL object that represents alpha test of the current material.
*
* @type {Node<float>}
*/
const materialAlphaTest = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.ALPHA_TEST );
/**
* TSL object that represents the diffuse color of the current material.
* The value is composed via `color` * `map`.
*
* @type {Node<vec3>}
*/
const materialColor = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.COLOR );
/**
* TSL object that represents the shininess of the current material.
*
* @type {Node<float>}
*/
const materialShininess = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.SHININESS );
/**
* TSL object that represents the emissive color of the current material.
* The value is composed via `emissive` * `emissiveIntensity` * `emissiveMap`.
*
* @type {Node<vec3>}
*/
const materialEmissive = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.EMISSIVE );
/**
* TSL object that represents the opacity of the current material.
* The value is composed via `opacity` * `alphaMap`.
*
* @type {Node<float>}
*/
const materialOpacity = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.OPACITY );
/**
* TSL object that represents the specular of the current material.
*
* @type {Node<vec3>}
*/
const materialSpecular = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.SPECULAR );
/**
* TSL object that represents the specular intensity of the current material.
* The value is composed via `specularIntensity` * `specularMap.a`.
*
* @type {Node<float>}
*/
const materialSpecularIntensity = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.SPECULAR_INTENSITY );
/**
* TSL object that represents the specular color of the current material.
* The value is composed via `specularColor` * `specularMap.rgb`.
*
* @type {Node<vec3>}
*/
const materialSpecularColor = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.SPECULAR_COLOR );
/**
* TSL object that represents the specular strength of the current material.
* The value is composed via `specularMap.r`.
*
* @type {Node<float>}
*/
const materialSpecularStrength = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.SPECULAR_STRENGTH );
/**
* TSL object that represents the reflectivity of the current material.
*
* @type {Node<float>}
*/
const materialReflectivity = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.REFLECTIVITY );
/**
* TSL object that represents the roughness of the current material.
* The value is composed via `roughness` * `roughnessMap.g`.
*
* @type {Node<float>}
*/
const materialRoughness = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.ROUGHNESS );
/**
* TSL object that represents the metalness of the current material.
* The value is composed via `metalness` * `metalnessMap.b`.
*
* @type {Node<float>}
*/
const materialMetalness = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.METALNESS );
/**
* TSL object that represents the normal of the current material.
* The value will be either `normalMap` * `normalScale`, `bumpMap` * `bumpScale` or `normalView`.
*
* @type {Node<vec3>}
*/
const materialNormal = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.NORMAL );
/**
* TSL object that represents the clearcoat of the current material.
* The value is composed via `clearcoat` * `clearcoatMap.r`
*
* @type {Node<float>}
*/
const materialClearcoat = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.CLEARCOAT );
/**
* TSL object that represents the clearcoat roughness of the current material.
* The value is composed via `clearcoatRoughness` * `clearcoatRoughnessMap.r`.
*
* @type {Node<float>}
*/
const materialClearcoatRoughness = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.CLEARCOAT_ROUGHNESS );
/**
* TSL object that represents the clearcoat normal of the current material.
* The value will be either `clearcoatNormalMap` or `normalView`.
*
* @type {Node<vec3>}
*/
const materialClearcoatNormal = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.CLEARCOAT_NORMAL );
/**
* TSL object that represents the rotation of the current sprite material.
*
* @type {Node<float>}
*/
const materialRotation = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.ROTATION );
/**
* TSL object that represents the sheen color of the current material.
* The value is composed via `sheen` * `sheenColor` * `sheenColorMap`.
*
* @type {Node<vec3>}
*/
const materialSheen = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.SHEEN );
/**
* TSL object that represents the sheen roughness of the current material.
* The value is composed via `sheenRoughness` * `sheenRoughnessMap.a`.
*
* @type {Node<float>}
*/
const materialSheenRoughness = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.SHEEN_ROUGHNESS );
/**
* TSL object that represents the anisotropy of the current material.
*
* @type {Node<vec2>}
*/
const materialAnisotropy = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.ANISOTROPY );
/**
* TSL object that represents the iridescence of the current material.
*
* @type {Node<float>}
*/
const materialIridescence = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.IRIDESCENCE );
/**
* TSL object that represents the iridescence IOR of the current material.
*
* @type {Node<float>}
*/
const materialIridescenceIOR = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.IRIDESCENCE_IOR );
/**
* TSL object that represents the iridescence thickness of the current material.
*
* @type {Node<float>}
*/
const materialIridescenceThickness = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.IRIDESCENCE_THICKNESS );
/**
* TSL object that represents the transmission of the current material.
* The value is composed via `transmission` * `transmissionMap.r`.
*
* @type {Node<float>}
*/
const materialTransmission = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.TRANSMISSION );
/**
* TSL object that represents the thickness of the current material.
* The value is composed via `thickness` * `thicknessMap.g`.
*
* @type {Node<float>}
*/
const materialThickness = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.THICKNESS );
/**
* TSL object that represents the IOR of the current material.
*
* @type {Node<float>}
*/
const materialIOR = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.IOR );
/**
* TSL object that represents the attenuation distance of the current material.
*
* @type {Node<float>}
*/
const materialAttenuationDistance = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.ATTENUATION_DISTANCE );
/**
* TSL object that represents the attenuation color of the current material.
*
* @type {Node<vec3>}
*/
const materialAttenuationColor = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.ATTENUATION_COLOR );
/**
* TSL object that represents the scale of the current dashed line material.
*
* @type {Node<float>}
*/
const materialLineScale = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.LINE_SCALE );
/**
* TSL object that represents the dash size of the current dashed line material.
*
* @type {Node<float>}
*/
const materialLineDashSize = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.LINE_DASH_SIZE );
/**
* TSL object that represents the gap size of the current dashed line material.
*
* @type {Node<float>}
*/
const materialLineGapSize = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.LINE_GAP_SIZE );
/**
* TSL object that represents the line width of the current line material.
*
* @type {Node<float>}
*/
const materialLineWidth = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.LINE_WIDTH );
/**
* TSL object that represents the dash offset of the current line material.
*
* @type {Node<float>}
*/
const materialLineDashOffset = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.LINE_DASH_OFFSET );
/**
* TSL object that represents the point size of the current points material.
*
* @type {Node<float>}
*/
const materialPointSize = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.POINT_SIZE );
/**
* TSL object that represents the dispersion of the current material.
*
* @type {Node<float>}
*/
const materialDispersion = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.DISPERSION );
/**
* TSL object that represents the light map of the current material.
* The value is composed via `lightMapIntensity` * `lightMap.rgb`.
*
* @type {Node<vec3>}
*/
const materialLightMap = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.LIGHT_MAP );
/**
* TSL object that represents the ambient occlusion map of the current material.
* The value is composed via `aoMap.r` - 1 * `aoMapIntensity` + 1.
*
* @type {Node<float>}
*/
const materialAO = /*@__PURE__*/ nodeImmutable( MaterialNode, MaterialNode.AO );
/**
* TSL object that represents the anisotropy vector of the current material.
*
* @type {Node<vec2>}
*/
const materialAnisotropyVector = /*@__PURE__*/ uniform( new Vector2() ).onReference( function ( frame ) {
return frame.material;
} ).onRenderUpdate( function ( { material } ) {
this.value.set( material.anisotropy * Math.cos( material.anisotropyRotation ), material.anisotropy * Math.sin( material.anisotropyRotation ) );
} );
/** @module ModelViewProjectionNode **/
/**
* TSL object that represents the position in clip space after the model-view-projection transform of the current rendered object.
*
* @type {VaryingNode<vec4>}
*/
const modelViewProjection = /*@__PURE__*/ ( Fn( ( builder ) => {
return builder.context.setupModelViewProjection();
}, 'vec4' ).once() )().toVarying( 'v_modelViewProjection' );
/** @module IndexNode **/
/**
* This class represents shader indices of different types. The following predefined node
* objects cover frequent use cases:
*
* - `vertexIndex`: The index of a vertex within a mesh.
* - `instanceIndex`: The index of either a mesh instance or an invocation of a compute shader.
* - `drawIndex`: The index of a draw call.
* - `invocationLocalIndex`: The index of a compute invocation within the scope of a workgroup load.
* - `invocationSubgroupIndex`: The index of a compute invocation within the scope of a subgroup.
* - `subgroupIndex`: The index of the subgroup the current compute invocation belongs to.
*
* @augments Node
*/
class IndexNode extends Node {
static get type() {
return 'IndexNode';
}
/**
* Constructs a new index node.
*
* @param {('vertex'|'instance'|'subgroup'|'invocationLocal'|'invocationSubgroup'|'draw')} scope - The scope of the index node.
*/
constructor( scope ) {
super( 'uint' );
/**
* The scope of the index node.
*
* @type {String}
*/
this.scope = scope;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isIndexNode = true;
}
generate( builder ) {
const nodeType = this.getNodeType( builder );
const scope = this.scope;
let propertyName;
if ( scope === IndexNode.VERTEX ) {
propertyName = builder.getVertexIndex();
} else if ( scope === IndexNode.INSTANCE ) {
propertyName = builder.getInstanceIndex();
} else if ( scope === IndexNode.DRAW ) {
propertyName = builder.getDrawIndex();
} else if ( scope === IndexNode.INVOCATION_LOCAL ) {
propertyName = builder.getInvocationLocalIndex();
} else if ( scope === IndexNode.INVOCATION_SUBGROUP ) {
propertyName = builder.getInvocationSubgroupIndex();
} else if ( scope === IndexNode.SUBGROUP ) {
propertyName = builder.getSubgroupIndex();
} else {
throw new Error( 'THREE.IndexNode: Unknown scope: ' + scope );
}
let output;
if ( builder.shaderStage === 'vertex' || builder.shaderStage === 'compute' ) {
output = propertyName;
} else {
const nodeVarying = varying( this );
output = nodeVarying.build( builder, nodeType );
}
return output;
}
}
IndexNode.VERTEX = 'vertex';
IndexNode.INSTANCE = 'instance';
IndexNode.SUBGROUP = 'subgroup';
IndexNode.INVOCATION_LOCAL = 'invocationLocal';
IndexNode.INVOCATION_SUBGROUP = 'invocationSubgroup';
IndexNode.DRAW = 'draw';
/**
* TSL object that represents the index of a vertex within a mesh.
*
* @type {IndexNode}
*/
const vertexIndex = /*@__PURE__*/ nodeImmutable( IndexNode, IndexNode.VERTEX );
/**
* TSL object that represents the index of either a mesh instance or an invocation of a compute shader.
*
* @type {IndexNode}
*/
const instanceIndex = /*@__PURE__*/ nodeImmutable( IndexNode, IndexNode.INSTANCE );
/**
* TSL object that represents the index of the subgroup the current compute invocation belongs to.
*
* @type {IndexNode}
*/
const subgroupIndex = /*@__PURE__*/ nodeImmutable( IndexNode, IndexNode.SUBGROUP );
/**
* TSL object that represents the index of a compute invocation within the scope of a subgroup.
*
* @type {IndexNode}
*/
const invocationSubgroupIndex = /*@__PURE__*/ nodeImmutable( IndexNode, IndexNode.INVOCATION_SUBGROUP );
/**
* TSL object that represents the index of a compute invocation within the scope of a workgroup load.
*
* @type {IndexNode}
*/
const invocationLocalIndex = /*@__PURE__*/ nodeImmutable( IndexNode, IndexNode.INVOCATION_LOCAL );
/**
* TSL object that represents the index of a draw call.
*
* @type {IndexNode}
*/
const drawIndex = /*@__PURE__*/ nodeImmutable( IndexNode, IndexNode.DRAW );
/** @module InstanceNode **/
/**
* This node implements the vertex shader logic which is required
* when rendering 3D objects via instancing. The code makes sure
* vertex positions, normals and colors can be modified via instanced
* data.
*
* @augments Node
*/
class InstanceNode extends Node {
static get type() {
return 'InstanceNode';
}
/**
* Constructs a new instance node.
*
* @param {Number} count - The number of instances.
* @param {InstancedBufferAttribute} instanceMatrix - Instanced buffer attribute representing the instance transformations.
* @param {InstancedBufferAttribute} instanceColor - Instanced buffer attribute representing the instance colors.
*/
constructor( count, instanceMatrix, instanceColor ) {
super( 'void' );
/**
* The number of instances.
*
* @type {Number}
*/
this.count = count;
/**
* Instanced buffer attribute representing the transformation of instances.
*
* @type {InstancedBufferAttribute}
*/
this.instanceMatrix = instanceMatrix;
/**
* Instanced buffer attribute representing the color of instances.
*
* @type {InstancedBufferAttribute}
*/
this.instanceColor = instanceColor;
/**
* The node that represents the instance matrix data.
*
* @type {Node}
*/
this.instanceMatrixNode = null;
/**
* The node that represents the instance color data.
*
* @type {Node}
*/
this.instanceColorNode = null;
/**
* The update type is set to `frame` since an update
* of instanced buffer data must be checked per frame.
*
* @type {String}
* @default 'frame'
*/
this.updateType = NodeUpdateType.FRAME;
/**
* A reference to a buffer that is used by `instanceMatrixNode`.
*
* @type {InstancedInterleavedBuffer}
*/
this.buffer = null;
/**
* A reference to a buffer that is used by `instanceColorNode`.
*
* @type {InstancedBufferAttribute}
*/
this.bufferColor = null;
}
/**
* Setups the internal buffers and nodes and assigns the transformed vertex data
* to predefined node variables for accumulation. That follows the same patterns
* like with morph and skinning nodes.
*
* @param {NodeBuilder} builder - The current node builder.
*/
setup( builder ) {
const { count, instanceMatrix, instanceColor } = this;
let { instanceMatrixNode, instanceColorNode } = this;
if ( instanceMatrixNode === null ) {
// Both WebGPU and WebGL backends have UBO max limited to 64kb. Matrix count number bigger than 1000 ( 16 * 4 * 1000 = 64kb ) will fallback to attribute.
if ( count <= 1000 ) {
instanceMatrixNode = buffer( instanceMatrix.array, 'mat4', Math.max( count, 1 ) ).element( instanceIndex );
} else {
const buffer = new InstancedInterleavedBuffer( instanceMatrix.array, 16, 1 );
this.buffer = buffer;
const bufferFn = instanceMatrix.usage === DynamicDrawUsage ? instancedDynamicBufferAttribute : instancedBufferAttribute;
const instanceBuffers = [
// F.Signature -> bufferAttribute( array, type, stride, offset )
bufferFn( buffer, 'vec4', 16, 0 ),
bufferFn( buffer, 'vec4', 16, 4 ),
bufferFn( buffer, 'vec4', 16, 8 ),
bufferFn( buffer, 'vec4', 16, 12 )
];
instanceMatrixNode = mat4( ...instanceBuffers );
}
this.instanceMatrixNode = instanceMatrixNode;
}
if ( instanceColor && instanceColorNode === null ) {
const buffer = new InstancedBufferAttribute( instanceColor.array, 3 );
const bufferFn = instanceColor.usage === DynamicDrawUsage ? instancedDynamicBufferAttribute : instancedBufferAttribute;
this.bufferColor = buffer;
instanceColorNode = vec3( bufferFn( buffer, 'vec3', 3, 0 ) );
this.instanceColorNode = instanceColorNode;
}
// POSITION
const instancePosition = instanceMatrixNode.mul( positionLocal ).xyz;
positionLocal.assign( instancePosition );
// NORMAL
if ( builder.hasGeometryAttribute( 'normal' ) ) {
const instanceNormal = transformNormal( normalLocal, instanceMatrixNode );
// ASSIGNS
normalLocal.assign( instanceNormal );
}
// COLOR
if ( this.instanceColorNode !== null ) {
varyingProperty( 'vec3', 'vInstanceColor' ).assign( this.instanceColorNode );
}
}
/**
* Checks if the internal buffers required an update.
*
* @param {NodeFrame} frame - The current node frame.
*/
update( /*frame*/ ) {
if ( this.instanceMatrix.usage !== DynamicDrawUsage && this.buffer !== null && this.instanceMatrix.version !== this.buffer.version ) {
this.buffer.version = this.instanceMatrix.version;
}
if ( this.instanceColor && this.instanceColor.usage !== DynamicDrawUsage && this.bufferColor !== null && this.instanceColor.version !== this.bufferColor.version ) {
this.bufferColor.version = this.instanceColor.version;
}
}
}
/**
* TSL function for creating an instance node.
*
* @function
* @param {Number} count - The number of instances.
* @param {InstancedBufferAttribute} instanceMatrix - Instanced buffer attribute representing the instance transformations.
* @param {InstancedBufferAttribute} instanceColor - Instanced buffer attribute representing the instance colors.
* @returns {InstanceNode}
*/
const instance = /*@__PURE__*/ nodeProxy( InstanceNode );
/** @module InstancedMeshNode **/
/**
* This is a special version of `InstanceNode` which requires the usage of {@link InstancedMesh}.
* It allows an easier setup of the instance node.
*
* @augments module:InstanceNode~InstanceNode
*/
class InstancedMeshNode extends InstanceNode {
static get type() {
return 'InstancedMeshNode';
}
/**
* Constructs a new instanced mesh node.
*
* @param {InstancedMesh} instancedMesh - The instanced mesh.
*/
constructor( instancedMesh ) {
const { count, instanceMatrix, instanceColor } = instancedMesh;
super( count, instanceMatrix, instanceColor );
/**
* A reference to the instanced mesh.
*
* @type {InstancedMesh}
*/
this.instancedMesh = instancedMesh;
}
}
/**
* TSL function for creating an instanced mesh node.
*
* @function
* @param {InstancedMesh} instancedMesh - The instancedMesh.
* @returns {InstancedMeshNode}
*/
const instancedMesh = /*@__PURE__*/ nodeProxy( InstancedMeshNode );
/** @module BatchNode **/
/**
* This node implements the vertex shader logic which is required
* when rendering 3D objects via batching. `BatchNode` must be used
* with instances of {@link BatchedMesh}.
*
* @augments Node
*/
class BatchNode extends Node {
static get type() {
return 'BatchNode';
}
/**
* Constructs a new batch node.
*
* @param {BatchedMesh} batchMesh - A reference to batched mesh.
*/
constructor( batchMesh ) {
super( 'void' );
/**
* A reference to batched mesh.
*
* @type {BatchedMesh}
*/
this.batchMesh = batchMesh;
/**
* The batching index node.
*
* @type {IndexNode?}
* @default null
*/
this.batchingIdNode = null;
}
/**
* Setups the internal buffers and nodes and assigns the transformed vertex data
* to predefined node variables for accumulation. That follows the same patterns
* like with morph and skinning nodes.
*
* @param {NodeBuilder} builder - The current node builder.
*/
setup( builder ) {
if ( this.batchingIdNode === null ) {
if ( builder.getDrawIndex() === null ) {
this.batchingIdNode = instanceIndex;
} else {
this.batchingIdNode = drawIndex;
}
}
const getIndirectIndex = Fn( ( [ id ] ) => {
const size = int( textureSize( textureLoad( this.batchMesh._indirectTexture ), 0 ) );
const x = int( id ).modInt( size );
const y = int( id ).div( size );
return textureLoad( this.batchMesh._indirectTexture, ivec2( x, y ) ).x;
} ).setLayout( {
name: 'getIndirectIndex',
type: 'uint',
inputs: [
{ name: 'id', type: 'int' }
]
} );
const indirectId = getIndirectIndex( int( this.batchingIdNode ) );
const matricesTexture = this.batchMesh._matricesTexture;
const size = textureSize( textureLoad( matricesTexture ), 0 );
const j = float( indirectId ).mul( 4 ).toInt().toVar();
const x = j.modInt( size );
const y = j.div( int( size ) );
const batchingMatrix = mat4(
textureLoad( matricesTexture, ivec2( x, y ) ),
textureLoad( matricesTexture, ivec2( x.add( 1 ), y ) ),
textureLoad( matricesTexture, ivec2( x.add( 2 ), y ) ),
textureLoad( matricesTexture, ivec2( x.add( 3 ), y ) )
);
const colorsTexture = this.batchMesh._colorsTexture;
if ( colorsTexture !== null ) {
const getBatchingColor = Fn( ( [ id ] ) => {
const size = textureSize( textureLoad( colorsTexture ), 0 ).x;
const j = id;
const x = j.modInt( size );
const y = j.div( size );
return textureLoad( colorsTexture, ivec2( x, y ) ).rgb;
} ).setLayout( {
name: 'getBatchingColor',
type: 'vec3',
inputs: [
{ name: 'id', type: 'int' }
]
} );
const color = getBatchingColor( indirectId );
varyingProperty( 'vec3', 'vBatchColor' ).assign( color );
}
const bm = mat3( batchingMatrix );
positionLocal.assign( batchingMatrix.mul( positionLocal ) );
const transformedNormal = normalLocal.div( vec3( bm[ 0 ].dot( bm[ 0 ] ), bm[ 1 ].dot( bm[ 1 ] ), bm[ 2 ].dot( bm[ 2 ] ) ) );
const batchingNormal = bm.mul( transformedNormal ).xyz;
normalLocal.assign( batchingNormal );
if ( builder.hasGeometryAttribute( 'tangent' ) ) {
tangentLocal.mulAssign( bm );
}
}
}
/**
* TSL function for creating a batch node.
*
* @function
* @param {BatchedMesh} batchMesh - A reference to batched mesh.
* @returns {BatchNode}
*/
const batch = /*@__PURE__*/ nodeProxy( BatchNode );
/** @module SkinningNode **/
const _frameId = new WeakMap();
/**
* This node implements the vertex transformation shader logic which is required
* for skinning/skeletal animation.
*
* @augments Node
*/
class SkinningNode extends Node {
static get type() {
return 'SkinningNode';
}
/**
* Constructs a new skinning node.
*
* @param {SkinnedMesh} skinnedMesh - The skinned mesh.
* @param {Boolean} [useReference=false] - Whether to use reference nodes for internal skinned mesh related data or not.
*/
constructor( skinnedMesh, useReference = false ) {
super( 'void' );
/**
* The skinned mesh.
*
* @type {SkinnedMesh}
*/
this.skinnedMesh = skinnedMesh;
/**
* Whether to use reference nodes for internal skinned mesh related data or not.
* TODO: Explain the purpose of the property.
*
* @type {Boolean}
*/
this.useReference = useReference;
/**
* The update type overwritten since skinning nodes are updated per object.
*
* @type {String}
*/
this.updateType = NodeUpdateType.OBJECT;
//
/**
* The skin index attribute.
*
* @type {AttributeNode}
*/
this.skinIndexNode = attribute( 'skinIndex', 'uvec4' );
/**
* The skin weight attribute.
*
* @type {AttributeNode}
*/
this.skinWeightNode = attribute( 'skinWeight', 'vec4' );
let bindMatrixNode, bindMatrixInverseNode, boneMatricesNode;
if ( useReference ) {
bindMatrixNode = reference( 'bindMatrix', 'mat4' );
bindMatrixInverseNode = reference( 'bindMatrixInverse', 'mat4' );
boneMatricesNode = referenceBuffer( 'skeleton.boneMatrices', 'mat4', skinnedMesh.skeleton.bones.length );
} else {
bindMatrixNode = uniform( skinnedMesh.bindMatrix, 'mat4' );
bindMatrixInverseNode = uniform( skinnedMesh.bindMatrixInverse, 'mat4' );
boneMatricesNode = buffer( skinnedMesh.skeleton.boneMatrices, 'mat4', skinnedMesh.skeleton.bones.length );
}
/**
* The bind matrix node.
*
* @type {Node<mat4>}
*/
this.bindMatrixNode = bindMatrixNode;
/**
* The bind matrix inverse node.
*
* @type {Node<mat4>}
*/
this.bindMatrixInverseNode = bindMatrixInverseNode;
/**
* The bind matrices as a uniform buffer node.
*
* @type {Node}
*/
this.boneMatricesNode = boneMatricesNode;
/**
* The previous bind matrices as a uniform buffer node.
* Required for computing motion vectors.
*
* @type {Node?}
* @default null
*/
this.previousBoneMatricesNode = null;
}
/**
* Transforms the given vertex position via skinning.
*
* @param {Node} [boneMatrices=this.boneMatricesNode] - The bone matrices
* @param {Node<vec3>} [position=positionLocal] - The vertex position in local space.
* @return {Node<vec3>} The transformed vertex position.
*/
getSkinnedPosition( boneMatrices = this.boneMatricesNode, position = positionLocal ) {
const { skinIndexNode, skinWeightNode, bindMatrixNode, bindMatrixInverseNode } = this;
const boneMatX = boneMatrices.element( skinIndexNode.x );
const boneMatY = boneMatrices.element( skinIndexNode.y );
const boneMatZ = boneMatrices.element( skinIndexNode.z );
const boneMatW = boneMatrices.element( skinIndexNode.w );
// POSITION
const skinVertex = bindMatrixNode.mul( position );
const skinned = add(
boneMatX.mul( skinWeightNode.x ).mul( skinVertex ),
boneMatY.mul( skinWeightNode.y ).mul( skinVertex ),
boneMatZ.mul( skinWeightNode.z ).mul( skinVertex ),
boneMatW.mul( skinWeightNode.w ).mul( skinVertex )
);
return bindMatrixInverseNode.mul( skinned ).xyz;
}
/**
* Transforms the given vertex normal via skinning.
*
* @param {Node} [boneMatrices=this.boneMatricesNode] - The bone matrices
* @param {Node<vec3>} [normal=normalLocal] - The vertex normal in local space.
* @return {Node<vec3>} The transformed vertex normal.
*/
getSkinnedNormal( boneMatrices = this.boneMatricesNode, normal = normalLocal ) {
const { skinIndexNode, skinWeightNode, bindMatrixNode, bindMatrixInverseNode } = this;
const boneMatX = boneMatrices.element( skinIndexNode.x );
const boneMatY = boneMatrices.element( skinIndexNode.y );
const boneMatZ = boneMatrices.element( skinIndexNode.z );
const boneMatW = boneMatrices.element( skinIndexNode.w );
// NORMAL
let skinMatrix = add(
skinWeightNode.x.mul( boneMatX ),
skinWeightNode.y.mul( boneMatY ),
skinWeightNode.z.mul( boneMatZ ),
skinWeightNode.w.mul( boneMatW )
);
skinMatrix = bindMatrixInverseNode.mul( skinMatrix ).mul( bindMatrixNode );
return skinMatrix.transformDirection( normal ).xyz;
}
/**
* Transforms the given vertex normal via skinning.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {Node<vec3>} The skinned position from the previous frame.
*/
getPreviousSkinnedPosition( builder ) {
const skinnedMesh = builder.object;
if ( this.previousBoneMatricesNode === null ) {
skinnedMesh.skeleton.previousBoneMatrices = new Float32Array( skinnedMesh.skeleton.boneMatrices );
this.previousBoneMatricesNode = referenceBuffer( 'skeleton.previousBoneMatrices', 'mat4', skinnedMesh.skeleton.bones.length );
}
return this.getSkinnedPosition( this.previousBoneMatricesNode, positionPrevious );
}
/**
* Returns `true` if bone matrices from the previous frame are required.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {Boolean} Whether bone matrices from the previous frame are required or not.
*/
needsPreviousBoneMatrices( builder ) {
const mrt = builder.renderer.getMRT();
return ( mrt && mrt.has( 'velocity' ) ) || getDataFromObject( builder.object ).useVelocity === true;
}
/**
* Setups the skinning node by assigning the transformed vertex data to predefined node variables.
*
* @param {NodeBuilder} builder - The current node builder.
*/
setup( builder ) {
if ( this.needsPreviousBoneMatrices( builder ) ) {
positionPrevious.assign( this.getPreviousSkinnedPosition( builder ) );
}
const skinPosition = this.getSkinnedPosition();
positionLocal.assign( skinPosition );
if ( builder.hasGeometryAttribute( 'normal' ) ) {
const skinNormal = this.getSkinnedNormal();
normalLocal.assign( skinNormal );
if ( builder.hasGeometryAttribute( 'tangent' ) ) {
tangentLocal.assign( skinNormal );
}
}
}
/**
* Generates the code snippet of the skinning node.
*
* @param {NodeBuilder} builder - The current node builder.
* @param {String} output - The current output.
* @return {String} The generated code snippet.
*/
generate( builder, output ) {
if ( output !== 'void' ) {
return positionLocal.build( builder, output );
}
}
/**
* Updates the state of the skinned mesh by updating the skeleton once per frame.
*
* @param {NodeFrame} frame - The current node frame.
*/
update( frame ) {
const object = this.useReference ? frame.object : this.skinnedMesh;
const skeleton = object.skeleton;
if ( _frameId.get( skeleton ) === frame.frameId ) return;
_frameId.set( skeleton, frame.frameId );
if ( this.previousBoneMatricesNode !== null ) skeleton.previousBoneMatrices.set( skeleton.boneMatrices );
skeleton.update();
}
}
/**
* TSL function for creating a skinning node.
*
* @function
* @param {SkinnedMesh} skinnedMesh - The skinned mesh.
* @returns {SkinningNode}
*/
const skinning = ( skinnedMesh ) => nodeObject( new SkinningNode( skinnedMesh ) );
/**
* TSL function for creating a skinning node with reference usage.
*
* @function
* @param {SkinnedMesh} skinnedMesh - The skinned mesh.
* @returns {SkinningNode}
*/
const skinningReference = ( skinnedMesh ) => nodeObject( new SkinningNode( skinnedMesh, true ) );
/** @module LoopNode **/
/**
* This module offers a variety of ways to implement loops in TSL. In it's basic form it's:
* ```js
* Loop( count, ( { i } ) => {
*
* } );
* ```
* However, it is also possible to define a start and end ranges, data types and loop conditions:
* ```js
* Loop( { start: int( 0 ), end: int( 10 ), type: 'int', condition: '<' }, ( { i } ) => {
*
* } );
*```
* Nested loops can be defined in a compacted form:
* ```js
* Loop( 10, 5, ( { i, j } ) => {
*
* } );
* ```
* Loops that should run backwards can be defined like so:
* ```js
* Loop( { start: 10 }, () => {} );
* ```
* The module also provides `Break()` and `Continue()` TSL expression for loop control.
* @augments Node
*/
class LoopNode extends Node {
static get type() {
return 'LoopNode';
}
/**
* Constructs a new loop node.
*
* @param {Array<Any>} params - Depending on the loop type, array holds different parameterization values for the loop.
*/
constructor( params = [] ) {
super();
this.params = params;
}
/**
* Returns a loop variable name based on an index. The pattern is
* `0` = `i`, `1`= `j`, `2`= `k` and so on.
*
* @param {Number} index - The index.
* @return {String} The loop variable name.
*/
getVarName( index ) {
return String.fromCharCode( 'i'.charCodeAt( 0 ) + index );
}
/**
* Returns properties about this node.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {Object} The node properties.
*/
getProperties( builder ) {
const properties = builder.getNodeProperties( this );
if ( properties.stackNode !== undefined ) return properties;
//
const inputs = {};
for ( let i = 0, l = this.params.length - 1; i < l; i ++ ) {
const param = this.params[ i ];
const name = ( param.isNode !== true && param.name ) || this.getVarName( i );
const type = ( param.isNode !== true && param.type ) || 'int';
inputs[ name ] = expression( name, type );
}
const stack = builder.addStack(); // TODO: cache() it
properties.returnsNode = this.params[ this.params.length - 1 ]( inputs, stack, builder );
properties.stackNode = stack;
builder.removeStack();
return properties;
}
/**
* This method is overwritten since the node type is inferred based on the loop configuration.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The node type.
*/
getNodeType( builder ) {
const { returnsNode } = this.getProperties( builder );
return returnsNode ? returnsNode.getNodeType( builder ) : 'void';
}
setup( builder ) {
// setup properties
this.getProperties( builder );
}
generate( builder ) {
const properties = this.getProperties( builder );
const params = this.params;
const stackNode = properties.stackNode;
for ( let i = 0, l = params.length - 1; i < l; i ++ ) {
const param = params[ i ];
let start = null, end = null, name = null, type = null, condition = null, update = null;
if ( param.isNode ) {
type = 'int';
name = this.getVarName( i );
start = '0';
end = param.build( builder, type );
condition = '<';
} else {
type = param.type || 'int';
name = param.name || this.getVarName( i );
start = param.start;
end = param.end;
condition = param.condition;
update = param.update;
if ( typeof start === 'number' ) start = builder.generateConst( type, start );
else if ( start && start.isNode ) start = start.build( builder, type );
if ( typeof end === 'number' ) end = builder.generateConst( type, end );
else if ( end && end.isNode ) end = end.build( builder, type );
if ( start !== undefined && end === undefined ) {
start = start + ' - 1';
end = '0';
condition = '>=';
} else if ( end !== undefined && start === undefined ) {
start = '0';
condition = '<';
}
if ( condition === undefined ) {
if ( Number( start ) > Number( end ) ) {
condition = '>=';
} else {
condition = '<';
}
}
}
const internalParam = { start, end, condition };
//
const startSnippet = internalParam.start;
const endSnippet = internalParam.end;
let declarationSnippet = '';
let conditionalSnippet = '';
let updateSnippet = '';
if ( ! update ) {
if ( type === 'int' || type === 'uint' ) {
if ( condition.includes( '<' ) ) update = '++';
else update = '--';
} else {
if ( condition.includes( '<' ) ) update = '+= 1.';
else update = '-= 1.';
}
}
declarationSnippet += builder.getVar( type, name ) + ' = ' + startSnippet;
conditionalSnippet += name + ' ' + condition + ' ' + endSnippet;
updateSnippet += name + ' ' + update;
const forSnippet = `for ( ${ declarationSnippet }; ${ conditionalSnippet }; ${ updateSnippet } )`;
builder.addFlowCode( ( i === 0 ? '\n' : '' ) + builder.tab + forSnippet + ' {\n\n' ).addFlowTab();
}
const stackSnippet = stackNode.build( builder, 'void' );
const returnsSnippet = properties.returnsNode ? properties.returnsNode.build( builder ) : '';
builder.removeFlowTab().addFlowCode( '\n' + builder.tab + stackSnippet );
for ( let i = 0, l = this.params.length - 1; i < l; i ++ ) {
builder.addFlowCode( ( i === 0 ? '' : builder.tab ) + '}\n\n' ).removeFlowTab();
}
builder.addFlowTab();
return returnsSnippet;
}
}
/**
* TSL function for creating a loop node.
*
* @function
* @param {...Any} params - A list of parameters.
* @returns {LoopNode}
*/
const Loop = ( ...params ) => nodeObject( new LoopNode( nodeArray( params, 'int' ) ) ).append();
/**
* TSL function for creating a `Continue()` expression.
*
* @function
* @returns {ExpressionNode}
*/
const Continue = () => expression( 'continue' ).append();
/**
* TSL function for creating a `Break()` expression.
*
* @function
* @returns {ExpressionNode}
*/
const Break = () => expression( 'break' ).append();
// Deprecated
/**
* @function
* @deprecated since r168. Use {@link Loop} instead.
*
* @param {...any} params
* @returns {LoopNode}
*/
const loop = ( ...params ) => { // @deprecated, r168
console.warn( 'TSL.LoopNode: loop() has been renamed to Loop().' );
return Loop( ...params );
};
/** @module MorphNode **/
const _morphTextures = /*@__PURE__*/ new WeakMap();
const _morphVec4 = /*@__PURE__*/ new Vector4();
const getMorph = /*@__PURE__*/ Fn( ( { bufferMap, influence, stride, width, depth, offset } ) => {
const texelIndex = int( vertexIndex ).mul( stride ).add( offset );
const y = texelIndex.div( width );
const x = texelIndex.sub( y.mul( width ) );
const bufferAttrib = textureLoad( bufferMap, ivec2( x, y ) ).depth( depth );
return bufferAttrib.mul( influence );
} );
function getEntry( geometry ) {
const hasMorphPosition = geometry.morphAttributes.position !== undefined;
const hasMorphNormals = geometry.morphAttributes.normal !== undefined;
const hasMorphColors = geometry.morphAttributes.color !== undefined;
// instead of using attributes, the WebGL 2 code path encodes morph targets
// into an array of data textures. Each layer represents a single morph target.
const morphAttribute = geometry.morphAttributes.position || geometry.morphAttributes.normal || geometry.morphAttributes.color;
const morphTargetsCount = ( morphAttribute !== undefined ) ? morphAttribute.length : 0;
let entry = _morphTextures.get( geometry );
if ( entry === undefined || entry.count !== morphTargetsCount ) {
if ( entry !== undefined ) entry.texture.dispose();
const morphTargets = geometry.morphAttributes.position || [];
const morphNormals = geometry.morphAttributes.normal || [];
const morphColors = geometry.morphAttributes.color || [];
let vertexDataCount = 0;
if ( hasMorphPosition === true ) vertexDataCount = 1;
if ( hasMorphNormals === true ) vertexDataCount = 2;
if ( hasMorphColors === true ) vertexDataCount = 3;
let width = geometry.attributes.position.count * vertexDataCount;
let height = 1;
const maxTextureSize = 4096; // @TODO: Use 'capabilities.maxTextureSize'
if ( width > maxTextureSize ) {
height = Math.ceil( width / maxTextureSize );
width = maxTextureSize;
}
const buffer = new Float32Array( width * height * 4 * morphTargetsCount );
const bufferTexture = new DataArrayTexture( buffer, width, height, morphTargetsCount );
bufferTexture.type = FloatType;
bufferTexture.needsUpdate = true;
// fill buffer
const vertexDataStride = vertexDataCount * 4;
for ( let i = 0; i < morphTargetsCount; i ++ ) {
const morphTarget = morphTargets[ i ];
const morphNormal = morphNormals[ i ];
const morphColor = morphColors[ i ];
const offset = width * height * 4 * i;
for ( let j = 0; j < morphTarget.count; j ++ ) {
const stride = j * vertexDataStride;
if ( hasMorphPosition === true ) {
_morphVec4.fromBufferAttribute( morphTarget, j );
buffer[ offset + stride + 0 ] = _morphVec4.x;
buffer[ offset + stride + 1 ] = _morphVec4.y;
buffer[ offset + stride + 2 ] = _morphVec4.z;
buffer[ offset + stride + 3 ] = 0;
}
if ( hasMorphNormals === true ) {
_morphVec4.fromBufferAttribute( morphNormal, j );
buffer[ offset + stride + 4 ] = _morphVec4.x;
buffer[ offset + stride + 5 ] = _morphVec4.y;
buffer[ offset + stride + 6 ] = _morphVec4.z;
buffer[ offset + stride + 7 ] = 0;
}
if ( hasMorphColors === true ) {
_morphVec4.fromBufferAttribute( morphColor, j );
buffer[ offset + stride + 8 ] = _morphVec4.x;
buffer[ offset + stride + 9 ] = _morphVec4.y;
buffer[ offset + stride + 10 ] = _morphVec4.z;
buffer[ offset + stride + 11 ] = ( morphColor.itemSize === 4 ) ? _morphVec4.w : 1;
}
}
}
entry = {
count: morphTargetsCount,
texture: bufferTexture,
stride: vertexDataCount,
size: new Vector2( width, height )
};
_morphTextures.set( geometry, entry );
function disposeTexture() {
bufferTexture.dispose();
_morphTextures.delete( geometry );
geometry.removeEventListener( 'dispose', disposeTexture );
}
geometry.addEventListener( 'dispose', disposeTexture );
}
return entry;
}
/**
* This node implements the vertex transformation shader logic which is required
* for morph target animation.
*
* @augments Node
*/
class MorphNode extends Node {
static get type() {
return 'MorphNode';
}
/**
* Constructs a new morph node.
*
* @param {Mesh} mesh - The mesh holding the morph targets.
*/
constructor( mesh ) {
super( 'void' );
/**
* The mesh holding the morph targets.
*
* @type {Mesh}
*/
this.mesh = mesh;
/**
* A uniform node which represents the morph base influence value.
*
* @type {UniformNode<float>}
*/
this.morphBaseInfluence = uniform( 1 );
/**
* The update type overwritten since morph nodes are updated per object.
*
* @type {String}
*/
this.updateType = NodeUpdateType.OBJECT;
}
/**
* Setups the morph node by assigning the transformed vertex data to predefined node variables.
*
* @param {NodeBuilder} builder - The current node builder.
*/
setup( builder ) {
const { geometry } = builder;
const hasMorphPosition = geometry.morphAttributes.position !== undefined;
const hasMorphNormals = geometry.hasAttribute( 'normal' ) && geometry.morphAttributes.normal !== undefined;
const morphAttribute = geometry.morphAttributes.position || geometry.morphAttributes.normal || geometry.morphAttributes.color;
const morphTargetsCount = ( morphAttribute !== undefined ) ? morphAttribute.length : 0;
// nodes
const { texture: bufferMap, stride, size } = getEntry( geometry );
if ( hasMorphPosition === true ) positionLocal.mulAssign( this.morphBaseInfluence );
if ( hasMorphNormals === true ) normalLocal.mulAssign( this.morphBaseInfluence );
const width = int( size.width );
Loop( morphTargetsCount, ( { i } ) => {
const influence = float( 0 ).toVar();
if ( this.mesh.count > 1 && ( this.mesh.morphTexture !== null && this.mesh.morphTexture !== undefined ) ) {
influence.assign( textureLoad( this.mesh.morphTexture, ivec2( int( i ).add( 1 ), int( instanceIndex ) ) ).r );
} else {
influence.assign( reference( 'morphTargetInfluences', 'float' ).element( i ).toVar() );
}
if ( hasMorphPosition === true ) {
positionLocal.addAssign( getMorph( {
bufferMap,
influence,
stride,
width,
depth: i,
offset: int( 0 )
} ) );
}
if ( hasMorphNormals === true ) {
normalLocal.addAssign( getMorph( {
bufferMap,
influence,
stride,
width,
depth: i,
offset: int( 1 )
} ) );
}
} );
}
/**
* Updates the state of the morphed mesh by updating the base influence.
*
* @param {NodeFrame} frame - The current node frame.
*/
update( /*frame*/ ) {
const morphBaseInfluence = this.morphBaseInfluence;
if ( this.mesh.geometry.morphTargetsRelative ) {
morphBaseInfluence.value = 1;
} else {
morphBaseInfluence.value = 1 - this.mesh.morphTargetInfluences.reduce( ( a, b ) => a + b, 0 );
}
}
}
/**
* TSL function for creating a morph node.
*
* @function
* @param {Mesh} mesh - The mesh holding the morph targets.
* @returns {MorphNode}
*/
const morphReference = /*@__PURE__*/ nodeProxy( MorphNode );
/**
* Base class for lighting nodes.
*
* @augments Node
*/
class LightingNode extends Node {
static get type() {
return 'LightingNode';
}
/**
* Constructs a new lighting node.
*/
constructor() {
super( 'vec3' );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isLightingNode = true;
}
}
/**
* A generic class that can be used by nodes which contribute
* ambient occlusion to the scene. E.g. an ambient occlusion map
* node can be used as input for this module. Used in {@link NodeMaterial}.
*
* @augments LightingNode
*/
class AONode extends LightingNode {
static get type() {
return 'AONode';
}
/**
* Constructs a new AO node.
*
* @param {Node<float>?} [aoNode=null] - The ambient occlusion node.
*/
constructor( aoNode = null ) {
super();
/**
* The ambient occlusion node.
*
* @type {Node<float>?}
* @default null
*/
this.aoNode = aoNode;
}
setup( builder ) {
builder.context.ambientOcclusion.mulAssign( this.aoNode );
}
}
/**
* `LightingContextNode` represents an extension of the {@link module:ContextNode~ContextNode} module
* by adding lighting specific context data. It represents the runtime context of
* {@link LightsNode}.
*
* @augments ContextNode
*/
class LightingContextNode extends ContextNode {
static get type() {
return 'LightingContextNode';
}
/**
* Constructs a new lighting context node.
*
* @param {LightsNode} node - The lights node.
* @param {LightingModel?} [lightingModel=null] - The current lighting model.
* @param {Node<vec3>?} [backdropNode=null] - A backdrop node.
* @param {Node<float>?} [backdropAlphaNode=null] - A backdrop alpha node.
*/
constructor( node, lightingModel = null, backdropNode = null, backdropAlphaNode = null ) {
super( node );
/**
* The current lighting model.
*
* @type {LightingModel?}
* @default null
*/
this.lightingModel = lightingModel;
/**
* A backdrop node.
*
* @type {Node<vec3>?}
* @default null
*/
this.backdropNode = backdropNode;
/**
* A backdrop alpha node.
*
* @type {Node<float>?}
* @default null
*/
this.backdropAlphaNode = backdropAlphaNode;
this._value = null;
}
/**
* Returns a lighting context object.
*
* @return {{
* radiance: Node<vec3>,
* irradiance: Node<vec3>,
* iblIrradiance: Node<vec3>,
* ambientOcclusion: Node<float>,
* reflectedLight: {directDiffuse: Node<vec3>, directSpecular: Node<vec3>, indirectDiffuse: Node<vec3>, indirectSpecular: Node<vec3>},
* backdrop: Node<vec3>,
* backdropAlpha: Node<float>
* }} The lighting context object.
*/
getContext() {
const { backdropNode, backdropAlphaNode } = this;
const directDiffuse = vec3().toVar( 'directDiffuse' ),
directSpecular = vec3().toVar( 'directSpecular' ),
indirectDiffuse = vec3().toVar( 'indirectDiffuse' ),
indirectSpecular = vec3().toVar( 'indirectSpecular' );
const reflectedLight = {
directDiffuse,
directSpecular,
indirectDiffuse,
indirectSpecular
};
const context = {
radiance: vec3().toVar( 'radiance' ),
irradiance: vec3().toVar( 'irradiance' ),
iblIrradiance: vec3().toVar( 'iblIrradiance' ),
ambientOcclusion: float( 1 ).toVar( 'ambientOcclusion' ),
reflectedLight,
backdrop: backdropNode,
backdropAlpha: backdropAlphaNode
};
return context;
}
setup( builder ) {
this.value = this._value || ( this._value = this.getContext() );
this.value.lightingModel = this.lightingModel || builder.context.lightingModel;
return super.setup( builder );
}
}
const lightingContext = /*@__PURE__*/ nodeProxy( LightingContextNode );
/**
* A generic class that can be used by nodes which contribute
* irradiance to the scene. E.g. a light map node can be used
* as input for this module. Used in {@link NodeMaterial}.
*
* @augments LightingNode
*/
class IrradianceNode extends LightingNode {
static get type() {
return 'IrradianceNode';
}
/**
* Constructs a new irradiance node.
*
* @param {Node<vec3>} node - A node contributing irradiance.
*/
constructor( node ) {
super();
/**
* A node contributing irradiance.
*
* @type {Node<vec3>}
*/
this.node = node;
}
setup( builder ) {
builder.context.irradiance.addAssign( this.node );
}
}
/** @module ScreenNode **/
let screenSizeVec, viewportVec;
/**
* This node provides a collection of screen related metrics.
* Depending on {@link module:ScreenNode~ScreenNode#scope}, the nodes can represent
* resolution or viewport data as well as fragment or uv coordinates.
*
* @augments Node
*/
class ScreenNode extends Node {
static get type() {
return 'ScreenNode';
}
/**
* Constructs a new screen node.
*
* @param {('coordinate'|'viewport'|'size'|'uv')} scope - The node's scope.
*/
constructor( scope ) {
super();
/**
* The node represents different metric depending on which scope is selected.
*
* - `ScreenNode.COORDINATE`: Window-relative coordinates of the current fragment according to WebGPU standards.
* - `ScreenNode.VIEWPORT`: The current viewport defined as a four-dimensional vector.
* - `ScreenNode.SIZE`: The dimensions of the current bound framebuffer.
* - `ScreenNode.UV`: Normalized coordinates.
*
* @type {('coordinate'|'viewport'|'size'|'uv')}
*/
this.scope = scope;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isViewportNode = true;
}
/**
* This method is overwritten since the node type depends on the selected scope.
*
* @return {('vec2'|'vec4')} The node type.
*/
getNodeType() {
if ( this.scope === ScreenNode.VIEWPORT ) return 'vec4';
else return 'vec2';
}
/**
* This method is overwritten since the node's update type depends on the selected scope.
*
* @return {NodeUpdateType} The update type.
*/
getUpdateType() {
let updateType = NodeUpdateType.NONE;
if ( this.scope === ScreenNode.SIZE || this.scope === ScreenNode.VIEWPORT ) {
updateType = NodeUpdateType.RENDER;
}
this.updateType = updateType;
return updateType;
}
/**
* `ScreenNode` implements {@link Node#update} to retrieve viewport and size information
* from the current renderer.
*
* @param {NodeFrame} frame - A reference to the current node frame.
*/
update( { renderer } ) {
const renderTarget = renderer.getRenderTarget();
if ( this.scope === ScreenNode.VIEWPORT ) {
if ( renderTarget !== null ) {
viewportVec.copy( renderTarget.viewport );
} else {
renderer.getViewport( viewportVec );
viewportVec.multiplyScalar( renderer.getPixelRatio() );
}
} else {
if ( renderTarget !== null ) {
screenSizeVec.width = renderTarget.width;
screenSizeVec.height = renderTarget.height;
} else {
renderer.getDrawingBufferSize( screenSizeVec );
}
}
}
setup( /*builder*/ ) {
const scope = this.scope;
let output = null;
if ( scope === ScreenNode.SIZE ) {
output = uniform( screenSizeVec || ( screenSizeVec = new Vector2() ) );
} else if ( scope === ScreenNode.VIEWPORT ) {
output = uniform( viewportVec || ( viewportVec = new Vector4() ) );
} else {
output = vec2( screenCoordinate.div( screenSize ) );
}
return output;
}
generate( builder ) {
if ( this.scope === ScreenNode.COORDINATE ) {
let coord = builder.getFragCoord();
if ( builder.isFlipY() ) {
// follow webgpu standards
const size = builder.getNodeProperties( screenSize ).outputNode.build( builder );
coord = `${ builder.getType( 'vec2' ) }( ${ coord }.x, ${ size }.y - ${ coord }.y )`;
}
return coord;
}
return super.generate( builder );
}
}
ScreenNode.COORDINATE = 'coordinate';
ScreenNode.VIEWPORT = 'viewport';
ScreenNode.SIZE = 'size';
ScreenNode.UV = 'uv';
// Screen
/**
* TSL object that represents normalized screen coordinates, unitless in `[0, 1]`.
*
* @type {ScreenNode<vec2>}
*/
const screenUV = /*@__PURE__*/ nodeImmutable( ScreenNode, ScreenNode.UV );
/**
* TSL object that represents the screen resolution in physical pixel units.
*
* @type {ScreenNode<vec2>}
*/
const screenSize = /*@__PURE__*/ nodeImmutable( ScreenNode, ScreenNode.SIZE );
/**
* TSL object that represents the current `x`/`y` pixel position on the screen in physical pixel units.
*
* @type {ScreenNode<vec2>}
*/
const screenCoordinate = /*@__PURE__*/ nodeImmutable( ScreenNode, ScreenNode.COORDINATE );
// Viewport
/**
* TSL object that represents the viewport rectangle as `x`, `y`, `width` and `height` in physical pixel units.
*
* @type {ScreenNode<vec4>}
*/
const viewport = /*@__PURE__*/ nodeImmutable( ScreenNode, ScreenNode.VIEWPORT );
/**
* TSL object that represents the viewport resolution in physical pixel units.
*
* @type {ScreenNode<vec2>}
*/
const viewportSize = viewport.zw;
/**
* TSL object that represents the current `x`/`y` pixel position on the viewport in physical pixel units.
*
* @type {ScreenNode<vec2>}
*/
const viewportCoordinate = /*@__PURE__*/ screenCoordinate.sub( viewport.xy );
/**
* TSL object that represents normalized viewport coordinates, unitless in `[0, 1]`.
*
* @type {ScreenNode<vec2>}
*/
const viewportUV = /*@__PURE__*/ viewportCoordinate.div( viewportSize );
// Deprecated
/**
* @deprecated since r169. Use {@link screenSize} instead.
*/
const viewportResolution = /*@__PURE__*/ ( Fn( () => { // @deprecated, r169
console.warn( 'TSL.ViewportNode: "viewportResolution" is deprecated. Use "screenSize" instead.' );
return screenSize;
}, 'vec2' ).once() )();
/**
* @deprecated since r168. Use {@link screenUV} instead.
*/
const viewportTopLeft = /*@__PURE__*/ ( Fn( () => { // @deprecated, r168
console.warn( 'TSL.ViewportNode: "viewportTopLeft" is deprecated. Use "screenUV" instead.' );
return screenUV;
}, 'vec2' ).once() )();
/**
* @deprecated since r168. Use `screenUV.flipY()` instead.
*/
const viewportBottomLeft = /*@__PURE__*/ ( Fn( () => { // @deprecated, r168
console.warn( 'TSL.ViewportNode: "viewportBottomLeft" is deprecated. Use "screenUV.flipY()" instead.' );
return screenUV.flipY();
}, 'vec2' ).once() )();
/** @module ViewportTextureNode **/
const _size$4 = /*@__PURE__*/ new Vector2();
/**
* A special type of texture node which represents the data of the current viewport
* as a texture. The module extracts data from the current bound framebuffer with
* a copy operation so no extra render pass is required to produce the texture data
* (which is good for performance). `ViewportTextureNode` can be used as an input for a
* variety of effects like refractive or transmissive materials.
*
* @augments module:TextureNode~TextureNode
*/
class ViewportTextureNode extends TextureNode {
static get type() {
return 'ViewportTextureNode';
}
/**
* Constructs a new viewport texture node.
*
* @param {Node} [uvNode=screenUV] - The uv node.
* @param {Node?} [levelNode=null] - The level node.
* @param {Texture?} [framebufferTexture=null] - A framebuffer texture holding the viewport data. If not provided, a framebuffer texture is created automatically.
*/
constructor( uvNode = screenUV, levelNode = null, framebufferTexture = null ) {
if ( framebufferTexture === null ) {
framebufferTexture = new FramebufferTexture();
framebufferTexture.minFilter = LinearMipmapLinearFilter;
}
super( framebufferTexture, uvNode, levelNode );
/**
* Whether to generate mipmaps or not.
*
* @type {Boolean}
* @default false
*/
this.generateMipmaps = false;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isOutputTextureNode = true;
/**
* The `updateBeforeType` is set to `NodeUpdateType.FRAME` since the node renders the
* scene once per frame in its {@link ViewportTextureNode#updateBefore} method.
*
* @type {String}
* @default 'frame'
*/
this.updateBeforeType = NodeUpdateType.FRAME;
}
updateBefore( frame ) {
const renderer = frame.renderer;
renderer.getDrawingBufferSize( _size$4 );
//
const framebufferTexture = this.value;
if ( framebufferTexture.image.width !== _size$4.width || framebufferTexture.image.height !== _size$4.height ) {
framebufferTexture.image.width = _size$4.width;
framebufferTexture.image.height = _size$4.height;
framebufferTexture.needsUpdate = true;
}
//
const currentGenerateMipmaps = framebufferTexture.generateMipmaps;
framebufferTexture.generateMipmaps = this.generateMipmaps;
renderer.copyFramebufferToTexture( framebufferTexture );
framebufferTexture.generateMipmaps = currentGenerateMipmaps;
}
clone() {
const viewportTextureNode = new this.constructor( this.uvNode, this.levelNode, this.value );
viewportTextureNode.generateMipmaps = this.generateMipmaps;
return viewportTextureNode;
}
}
/**
* TSL function for creating a viewport texture node.
*
* @function
* @param {Node} [uvNode=screenUV] - The uv node.
* @param {Node?} [levelNode=null] - The level node.
* @param {Texture?} [framebufferTexture=null] - A framebuffer texture holding the viewport data. If not provided, a framebuffer texture is created automatically.
* @returns {ViewportTextureNode}
*/
const viewportTexture = /*@__PURE__*/ nodeProxy( ViewportTextureNode );
/**
* TSL function for creating a viewport texture node with enabled mipmap generation.
*
* @function
* @param {Node} [uvNode=screenUV] - The uv node.
* @param {Node?} [levelNode=null] - The level node.
* @param {Texture?} [framebufferTexture=null] - A framebuffer texture holding the viewport data. If not provided, a framebuffer texture is created automatically.
* @returns {ViewportTextureNode}
*/
const viewportMipTexture = /*@__PURE__*/ nodeProxy( ViewportTextureNode, null, null, { generateMipmaps: true } );
/** @module ViewportDepthTextureNode **/
let sharedDepthbuffer = null;
/**
* Represents the depth of the current viewport as a texture. This module
* can be used in combination with viewport texture to achieve effects
* that require depth evaluation.
*
* @augments module:ViewportTextureNode~ViewportTextureNode
*/
class ViewportDepthTextureNode extends ViewportTextureNode {
static get type() {
return 'ViewportDepthTextureNode';
}
/**
* Constructs a new viewport depth texture node.
*
* @param {Node} [uvNode=screenUV] - The uv node.
* @param {Node?} [levelNode=null] - The level node.
*/
constructor( uvNode = screenUV, levelNode = null ) {
if ( sharedDepthbuffer === null ) {
sharedDepthbuffer = new DepthTexture();
}
super( uvNode, levelNode, sharedDepthbuffer );
}
}
/**
* TSL function for a viewport depth texture node.
*
* @function
* @param {Node} [uvNode=screenUV] - The uv node.
* @param {Node?} [levelNode=null] - The level node.
* @returns {ViewportDepthTextureNode}
*/
const viewportDepthTexture = /*@__PURE__*/ nodeProxy( ViewportDepthTextureNode );
/** @module ViewportDepthNode **/
/**
* This node offers a collection of features in context of the depth logic in the fragment shader.
* Depending on {@link ViewportDepthNode#scope}, it can be used to define a depth value for the current
* fragment or for depth evaluation purposes.
*
* @augments Node
*/
class ViewportDepthNode extends Node {
static get type() {
return 'ViewportDepthNode';
}
/**
* Constructs a new viewport depth node.
*
* @param {('depth'|'depthBase'|'linearDepth')} scope - The node's scope.
* @param {Node?} [valueNode=null] - The value node.
*/
constructor( scope, valueNode = null ) {
super( 'float' );
/**
* The node behaves differently depending on which scope is selected.
*
* - `ViewportDepthNode.DEPTH_BASE`: Allows to define a value for the current fragment's depth.
* - `ViewportDepthNode.DEPTH`: Represents the depth value for the current fragment (`valueNode` is ignored).
* - `ViewportDepthNode.LINEAR_DEPTH`: Represents the linear (orthographic) depth value of the current fragment.
* If a `valueNode` is set, the scope can be used to convert perspective depth data to linear data.
*
* @type {('depth'|'depthBase'|'linearDepth')}
*/
this.scope = scope;
/**
* Can be used to define a custom depth value.
* The property is ignored in the `ViewportDepthNode.DEPTH` scope.
*
* @type {Node?}
* @default null
*/
this.valueNode = valueNode;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isViewportDepthNode = true;
}
generate( builder ) {
const { scope } = this;
if ( scope === ViewportDepthNode.DEPTH_BASE ) {
return builder.getFragDepth();
}
return super.generate( builder );
}
setup( { camera } ) {
const { scope } = this;
const value = this.valueNode;
let node = null;
if ( scope === ViewportDepthNode.DEPTH_BASE ) {
if ( value !== null ) {
node = depthBase().assign( value );
}
} else if ( scope === ViewportDepthNode.DEPTH ) {
if ( camera.isPerspectiveCamera ) {
node = viewZToPerspectiveDepth( positionView.z, cameraNear, cameraFar );
} else {
node = viewZToOrthographicDepth( positionView.z, cameraNear, cameraFar );
}
} else if ( scope === ViewportDepthNode.LINEAR_DEPTH ) {
if ( value !== null ) {
if ( camera.isPerspectiveCamera ) {
const viewZ = perspectiveDepthToViewZ( value, cameraNear, cameraFar );
node = viewZToOrthographicDepth( viewZ, cameraNear, cameraFar );
} else {
node = value;
}
} else {
node = viewZToOrthographicDepth( positionView.z, cameraNear, cameraFar );
}
}
return node;
}
}
ViewportDepthNode.DEPTH_BASE = 'depthBase';
ViewportDepthNode.DEPTH = 'depth';
ViewportDepthNode.LINEAR_DEPTH = 'linearDepth';
// NOTE: viewZ, the z-coordinate in camera space, is negative for points in front of the camera
/**
* TSL function for converting a viewZ value to an orthographic depth value.
*
* @function
* @param {Node<float>} viewZ - The viewZ node.
* @param {Node<float>} near - The camera's near value.
* @param {Node<float>} far - The camera's far value.
* @returns {Node<float>}
*/
const viewZToOrthographicDepth = ( viewZ, near, far ) => viewZ.add( near ).div( near.sub( far ) );
/**
* TSL function for converting an orthographic depth value to a viewZ value.
*
* @function
* @param {Node<float>} depth - The orthographic depth.
* @param {Node<float>} near - The camera's near value.
* @param {Node<float>} far - The camera's far value.
* @returns {Node<float>}
*/
const orthographicDepthToViewZ = ( depth, near, far ) => near.sub( far ).mul( depth ).sub( near );
/**
* TSL function for converting a viewZ value to a perspective depth value.
*
* Note: {link https://twitter.com/gonnavis/status/1377183786949959682}.
*
* @function
* @param {Node<float>} viewZ - The viewZ node.
* @param {Node<float>} near - The camera's near value.
* @param {Node<float>} far - The camera's far value.
* @returns {Node<float>}
*/
const viewZToPerspectiveDepth = ( viewZ, near, far ) => near.add( viewZ ).mul( far ).div( far.sub( near ).mul( viewZ ) );
/**
* TSL function for converting a perspective depth value to a viewZ value.
*
* @function
* @param {Node<float>} depth - The perspective depth.
* @param {Node<float>} near - The camera's near value.
* @param {Node<float>} far - The camera's far value.
* @returns {Node<float>}
*/
const perspectiveDepthToViewZ = ( depth, near, far ) => near.mul( far ).div( far.sub( near ).mul( depth ).sub( far ) );
/**
* TSL function for converting a viewZ value to a logarithmic depth value.
*
* @function
* @param {Node<float>} viewZ - The viewZ node.
* @param {Node<float>} near - The camera's near value.
* @param {Node<float>} far - The camera's far value.
* @returns {Node<float>}
*/
const viewZToLogarithmicDepth = ( viewZ, near, far ) => {
// NOTE: viewZ must be negative--see explanation at the end of this comment block.
// The final logarithmic depth formula used here is adapted from one described in an
// article by Thatcher Ulrich (see http://tulrich.com/geekstuff/log_depth_buffer.txt),
// which was an improvement upon an earlier formula one described in an
// Outerra article (https://outerra.blogspot.com/2009/08/logarithmic-z-buffer.html).
// Ulrich's formula is the following:
// z = K * log( w / cameraNear ) / log( cameraFar / cameraNear )
// where K = 2^k - 1, and k is the number of bits in the depth buffer.
// The Outerra variant ignored the camera near plane (it assumed it was 0) and instead
// opted for a "C-constant" for resolution adjustment of objects near the camera.
// Outerra states: "Notice that the 'C' variant doesn’t use a near plane distance, it has it
// set at 0" (quote from https://outerra.blogspot.com/2012/11/maximizing-depth-buffer-range-and.html).
// Ulrich's variant has the benefit of constant relative precision over the whole near-far range.
// It was debated here whether Outerra's "C-constant" or Ulrich's "near plane" variant should
// be used, and ultimately Ulrich's "near plane" version was chosen.
// Outerra eventually made another improvement to their original "C-constant" variant,
// but it still does not incorporate the camera near plane (for this version,
// see https://outerra.blogspot.com/2013/07/logarithmic-depth-buffer-optimizations.html).
// Here we make 4 changes to Ulrich's formula:
// 1. Clamp the camera near plane so we don't divide by 0.
// 2. Use log2 instead of log to avoid an extra multiply (shaders implement log using log2).
// 3. Assume K is 1 (K = maximum value in depth buffer; see Ulrich's formula above).
// 4. To maintain consistency with the functions "viewZToOrthographicDepth" and "viewZToPerspectiveDepth",
// we modify the formula here to use 'viewZ' instead of 'w'. The other functions expect a negative viewZ,
// so we do the same here, hence the 'viewZ.negate()' call.
// For visual representation of this depth curve, see https://www.desmos.com/calculator/uyqk0vex1u
near = near.max( 1e-6 ).toVar();
const numerator = log2( viewZ.negate().div( near ) );
const denominator = log2( far.div( near ) );
return numerator.div( denominator );
};
/**
* TSL function for converting a logarithmic depth value to a viewZ value.
*
* @function
* @param {Node<float>} depth - The logarithmic depth.
* @param {Node<float>} near - The camera's near value.
* @param {Node<float>} far - The camera's far value.
* @returns {Node<float>}
*/
const logarithmicDepthToViewZ = ( depth, near, far ) => {
// NOTE: we add a 'negate()' call to the return value here to maintain consistency with
// the functions "orthographicDepthToViewZ" and "perspectiveDepthToViewZ" (they return
// a negative viewZ).
const exponent = depth.mul( log( far.div( near ) ) );
return float( Math.E ).pow( exponent ).mul( near ).negate();
};
/**
* TSL function for defining a value for the current fragment's depth.
*
* @function
* @param {Node<float>} value - The depth value to set.
* @returns {ViewportDepthNode<float>}
*/
const depthBase = /*@__PURE__*/ nodeProxy( ViewportDepthNode, ViewportDepthNode.DEPTH_BASE );
/**
* TSL object that represents the depth value for the current fragment.
*
* @type {ViewportDepthNode}
*/
const depth = /*@__PURE__*/ nodeImmutable( ViewportDepthNode, ViewportDepthNode.DEPTH );
/**
* TSL function for converting a perspective depth value to linear depth.
*
* @function
* @param {Node<float>} value - The perspective depth.
* @returns {ViewportDepthNode<float>}
*/
const linearDepth = /*@__PURE__*/ nodeProxy( ViewportDepthNode, ViewportDepthNode.LINEAR_DEPTH );
/**
* TSL object that represents the linear (orthographic) depth value of the current fragment
*
* @type {ViewportDepthNode}
*/
const viewportLinearDepth = /*@__PURE__*/ linearDepth( viewportDepthTexture() );
depth.assign = ( value ) => depthBase( value );
/** @module BuiltinNode **/
/**
* The node allows to set values for built-in shader variables. That is
* required for features like hardware-accelerated vertex clipping.
*
* @augments Node
*/
class BuiltinNode extends Node {
/**
* Constructs a new builtin node.
*
* @param {String} name - The name of the built-in shader variable.
*/
constructor( name ) {
super( 'float' );
/**
* The name of the built-in shader variable.
*
* @type {String}
*/
this.name = name;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isBuiltinNode = true;
}
/**
* Generates the code snippet of the builtin node.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The generated code snippet.
*/
generate( /* builder */ ) {
return this.name;
}
}
/**
* TSL function for creating a builtin node.
*
* @function
* @param {String} name - The name of the built-in shader variable.
* @returns {BuiltinNode}
*/
const builtin = nodeProxy( BuiltinNode );
/** @module ClippingNode **/
/**
* ```
* This node is used in {@link NodeMaterial} to setup the clipping
* which can happen hardware-accelerated (if supported) and optionally
* use alpha-to-coverage for anti-aliasing clipped edges.
* ```
* @augments Node
*/
class ClippingNode extends Node {
static get type() {
return 'ClippingNode';
}
/**
* Constructs a new clipping node.
*
* @param {('default'|'hardware'|'alphaToCoverage')} [scope='default'] - The node's scope. Similar to other nodes,
* the selected scope influences the behavior of the node and what type of code is generated.
*/
constructor( scope = ClippingNode.DEFAULT ) {
super();
/**
* The node's scope. Similar to other nodes, the selected scope influences
* the behavior of the node and what type of code is generated.
*
* @type {('default'|'hardware'|'alphaToCoverage')}
*/
this.scope = scope;
}
/**
* Setups the node depending on the selected scope.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {Node} The result node.
*/
setup( builder ) {
super.setup( builder );
const clippingContext = builder.clippingContext;
const { intersectionPlanes, unionPlanes } = clippingContext;
this.hardwareClipping = builder.material.hardwareClipping;
if ( this.scope === ClippingNode.ALPHA_TO_COVERAGE ) {
return this.setupAlphaToCoverage( intersectionPlanes, unionPlanes );
} else if ( this.scope === ClippingNode.HARDWARE ) {
return this.setupHardwareClipping( unionPlanes, builder );
} else {
return this.setupDefault( intersectionPlanes, unionPlanes );
}
}
/**
* Setups alpha to coverage.
*
* @param {Array<Vector4>} intersectionPlanes - The intersection planes.
* @param {Array<Vector4>} unionPlanes - The union planes.
* @return {Node} The result node.
*/
setupAlphaToCoverage( intersectionPlanes, unionPlanes ) {
return Fn( () => {
const distanceToPlane = float().toVar( 'distanceToPlane' );
const distanceGradient = float().toVar( 'distanceToGradient' );
const clipOpacity = float( 1 ).toVar( 'clipOpacity' );
const numUnionPlanes = unionPlanes.length;
if ( this.hardwareClipping === false && numUnionPlanes > 0 ) {
const clippingPlanes = uniformArray( unionPlanes );
Loop( numUnionPlanes, ( { i } ) => {
const plane = clippingPlanes.element( i );
distanceToPlane.assign( positionView.dot( plane.xyz ).negate().add( plane.w ) );
distanceGradient.assign( distanceToPlane.fwidth().div( 2.0 ) );
clipOpacity.mulAssign( smoothstep( distanceGradient.negate(), distanceGradient, distanceToPlane ) );
} );
}
const numIntersectionPlanes = intersectionPlanes.length;
if ( numIntersectionPlanes > 0 ) {
const clippingPlanes = uniformArray( intersectionPlanes );
const intersectionClipOpacity = float( 1 ).toVar( 'intersectionClipOpacity' );
Loop( numIntersectionPlanes, ( { i } ) => {
const plane = clippingPlanes.element( i );
distanceToPlane.assign( positionView.dot( plane.xyz ).negate().add( plane.w ) );
distanceGradient.assign( distanceToPlane.fwidth().div( 2.0 ) );
intersectionClipOpacity.mulAssign( smoothstep( distanceGradient.negate(), distanceGradient, distanceToPlane ).oneMinus() );
} );
clipOpacity.mulAssign( intersectionClipOpacity.oneMinus() );
}
diffuseColor.a.mulAssign( clipOpacity );
diffuseColor.a.equal( 0.0 ).discard();
} )();
}
/**
* Setups the default clipping.
*
* @param {Array<Vector4>} intersectionPlanes - The intersection planes.
* @param {Array<Vector4>} unionPlanes - The union planes.
* @return {Node} The result node.
*/
setupDefault( intersectionPlanes, unionPlanes ) {
return Fn( () => {
const numUnionPlanes = unionPlanes.length;
if ( this.hardwareClipping === false && numUnionPlanes > 0 ) {
const clippingPlanes = uniformArray( unionPlanes );
Loop( numUnionPlanes, ( { i } ) => {
const plane = clippingPlanes.element( i );
positionView.dot( plane.xyz ).greaterThan( plane.w ).discard();
} );
}
const numIntersectionPlanes = intersectionPlanes.length;
if ( numIntersectionPlanes > 0 ) {
const clippingPlanes = uniformArray( intersectionPlanes );
const clipped = bool( true ).toVar( 'clipped' );
Loop( numIntersectionPlanes, ( { i } ) => {
const plane = clippingPlanes.element( i );
clipped.assign( positionView.dot( plane.xyz ).greaterThan( plane.w ).and( clipped ) );
} );
clipped.discard();
}
} )();
}
/**
* Setups hardware clipping.
*
* @param {Array<Vector4>} unionPlanes - The union planes.
* @param {NodeBuilder} builder - The current node builder.
* @return {Node} The result node.
*/
setupHardwareClipping( unionPlanes, builder ) {
const numUnionPlanes = unionPlanes.length;
builder.enableHardwareClipping( numUnionPlanes );
return Fn( () => {
const clippingPlanes = uniformArray( unionPlanes );
const hw_clip_distances = builtin( builder.getClipDistance() );
Loop( numUnionPlanes, ( { i } ) => {
const plane = clippingPlanes.element( i );
const distance = positionView.dot( plane.xyz ).sub( plane.w ).negate();
hw_clip_distances.element( i ).assign( distance );
} );
} )();
}
}
ClippingNode.ALPHA_TO_COVERAGE = 'alphaToCoverage';
ClippingNode.DEFAULT = 'default';
ClippingNode.HARDWARE = 'hardware';
/**
* TSL function for setting up the default clipping logic.
*
* @function
* @returns {ClippingNode}
*/
const clipping = () => nodeObject( new ClippingNode() );
/**
* TSL function for setting up alpha to coverage.
*
* @function
* @returns {ClippingNode}
*/
const clippingAlpha = () => nodeObject( new ClippingNode( ClippingNode.ALPHA_TO_COVERAGE ) );
/**
* TSL function for setting up hardware-based clipping.
*
* @function
* @returns {ClippingNode}
*/
const hardwareClipping = () => nodeObject( new ClippingNode( ClippingNode.HARDWARE ) );
// See: https://casual-effects.com/research/Wyman2017Hashed/index.html
const ALPHA_HASH_SCALE = 0.05; // Derived from trials only, and may be changed.
const hash2D = /*@__PURE__*/ Fn( ( [ value ] ) => {
return fract( mul( 1.0e4, sin( mul( 17.0, value.x ).add( mul( 0.1, value.y ) ) ) ).mul( add( 0.1, abs( sin( mul( 13.0, value.y ).add( value.x ) ) ) ) ) );
} );
const hash3D = /*@__PURE__*/ Fn( ( [ value ] ) => {
return hash2D( vec2( hash2D( value.xy ), value.z ) );
} );
const getAlphaHashThreshold = /*@__PURE__*/ Fn( ( [ position ] ) => {
// Find the discretized derivatives of our coordinates
const maxDeriv = max$1(
length( dFdx( position.xyz ) ),
length( dFdy( position.xyz ) )
);
const pixScale = float( 1 ).div( float( ALPHA_HASH_SCALE ).mul( maxDeriv ) ).toVar( 'pixScale' );
// Find two nearest log-discretized noise scales
const pixScales = vec2(
exp2( floor( log2( pixScale ) ) ),
exp2( ceil( log2( pixScale ) ) )
);
// Compute alpha thresholds at our two noise scales
const alpha = vec2(
hash3D( floor( pixScales.x.mul( position.xyz ) ) ),
hash3D( floor( pixScales.y.mul( position.xyz ) ) ),
);
// Factor to interpolate lerp with
const lerpFactor = fract( log2( pixScale ) );
// Interpolate alpha threshold from noise at two scales
const x = add( mul( lerpFactor.oneMinus(), alpha.x ), mul( lerpFactor, alpha.y ) );
// Pass into CDF to compute uniformly distrib threshold
const a = min$1( lerpFactor, lerpFactor.oneMinus() );
const cases = vec3(
x.mul( x ).div( mul( 2.0, a ).mul( sub( 1.0, a ) ) ),
x.sub( mul( 0.5, a ) ).div( sub( 1.0, a ) ),
sub( 1.0, sub( 1.0, x ).mul( sub( 1.0, x ) ).div( mul( 2.0, a ).mul( sub( 1.0, a ) ) ) ) );
// Find our final, uniformly distributed alpha threshold (ατ)
const threshold = x.lessThan( a.oneMinus() ).select( x.lessThan( a ).select( cases.x, cases.y ), cases.z );
// Avoids ατ == 0. Could also do ατ =1-ατ
return clamp( threshold, 1.0e-6, 1.0 );
} ).setLayout( {
name: 'getAlphaHashThreshold',
type: 'float',
inputs: [
{ name: 'position', type: 'vec3' }
]
} );
/**
* Base class for all node materials.
*
* @augments Material
*/
class NodeMaterial extends Material {
static get type() {
return 'NodeMaterial';
}
/**
* Represents the type of the node material.
*
* @type {String}
*/
get type() {
return this.constructor.type;
}
set type( _value ) { /* */ }
/**
* Constructs a new node material.
*/
constructor() {
super();
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isNodeMaterial = true;
/**
* Whether this material is affected by fog or not.
*
* @type {Boolean}
* @default true
*/
this.fog = true;
/**
* Whether this material is affected by lights or not.
*
* @type {Boolean}
* @default false
*/
this.lights = false;
/**
* Whether this material uses hardware clipping or not.
* This property is managed by the engine and should not be
* modified by apps.
*
* @type {Boolean}
* @default false
*/
this.hardwareClipping = false;
/**
* Node materials which set their `lights` property to `true`
* are affected by all lights of the scene. Sometimes selective
* lighting is wanted which means only _some_ lights in the scene
* affect a material. This can be achieved by creating an instance
* of {@link module:LightsNode~LightsNode} with a list of selective
* lights and assign the node to this property.
*
* ```js
* const customLightsNode = lights( [ light1, light2 ] );
* material.lightsNode = customLightsNode;
* ```
*
* @type {LightsNode?}
* @default null
*/
this.lightsNode = null;
/**
* The environment of node materials can be defined by an environment
* map assigned to the `envMap` property or by `Scene.environment`
* if the node material is a PBR material. This node property allows to overwrite
* the default behavior and define the environment with a custom node.
*
* ```js
* material.envNode = pmremTexture( renderTarget.texture );
* ```
*
* @type {Node<vec3>?}
* @default null
*/
this.envNode = null;
/**
* The lighting of node materials might be influenced by ambient occlusion.
* The default AO is inferred from an ambient occlusion map assigned to `aoMap`
* and the respective `aoMapIntensity`. This node property allows to overwrite
* the default and define the ambient occlusion with a custom node instead.
*
* If you don't want to overwrite the diffuse color but modify the existing
* values instead, use {@link module:MaterialNode.materialAO}.
*
* @type {Node<float>?}
* @default null
*/
this.aoNode = null;
/**
* The diffuse color of node materials is by default inferred from the
* `color` and `map` properties. This node property allows to overwrite the default
* and define the diffuse color with a node instead.
*
* ```js
* material.colorNode = color( 0xff0000 ); // define red color
* ```
*
* If you don't want to overwrite the diffuse color but modify the existing
* values instead, use {@link module:MaterialNode.materialColor}.
*
* ```js
* material.colorNode = materialColor.mul( color( 0xff0000 ) ); // give diffuse colors a red tint
* ```
*
* @type {Node<vec3>?}
* @default null
*/
this.colorNode = null;
/**
* The normals of node materials are by default inferred from the `normalMap`/`normalScale`
* or `bumpMap`/`bumpScale` properties. This node property allows to overwrite the default
* and define the normals with a node instead.
*
* If you don't want to overwrite the normals but modify the existing values instead,
* use {@link module:MaterialNode.materialNormal}.
*
* @type {Node<vec3>?}
* @default null
*/
this.normalNode = null;
/**
* The opacity of node materials is by default inferred from the `opacity`
* and `alphaMap` properties. This node property allows to overwrite the default
* and define the opacity with a node instead.
*
* If you don't want to overwrite the normals but modify the existing
* value instead, use {@link module:MaterialNode.materialOpacity}.
*
* @type {Node<float>?}
* @default null
*/
this.opacityNode = null;
/**
* This node can be used to to implement a variety of filter-like effects. The idea is
* to store the current rendering into a texture e.g. via `viewportSharedTexture()`, use it
* to create an arbitrary effect and then assign the node composition to this property.
* Everything behind the object using this material will now be affected by a filter.
*
* ```js
* const material = new NodeMaterial()
* material.transparent = true;
*
* // everything behind the object will be monochromatic
* material.backdropNode = saturation( viewportSharedTexture().rgb, 0 );
* ```
*
* Backdrop computations are part of the lighting so only lit materials can use this property.
*
* @type {Node<vec3>?}
* @default null
*/
this.backdropNode = null;
/**
* This node allows to modulate the influence of `backdropNode` to the outgoing light.
*
* @type {Node<float>?}
* @default null
*/
this.backdropAlphaNode = null;
/**
* The alpha test of node materials is by default inferred from the `alphaTest`
* property. This node property allows to overwrite the default and define the
* alpha test with a node instead.
*
* If you don't want to overwrite the alpha test but modify the existing
* value instead, use {@link module:MaterialNode.materialAlphaTest}.
*
* @type {Node<float>?}
* @default null
*/
this.alphaTestNode = null;
/**
* The local vertex positions are computed based on multiple factors like the
* attribute data, morphing or skinning. This node property allows to overwrite
* the default and define local vertex positions with nodes instead.
*
* If you don't want to overwrite the vertex positions but modify the existing
* values instead, use {@link module:Position.positionLocal}.
*
*```js
* material.positionNode = positionLocal.add( displace );
* ```
*
* @type {Node<vec3>?}
* @default null
*/
this.positionNode = null;
/**
* This node property is intended for logic which modifies geometry data once or per animation step.
* Apps usually place such logic randomly in initialization routines or in the animation loop.
* `geometryNode` is intended as a dedicated API so there is an intended spot where geometry modifications
* can be implemented.
*
* The idea is to assign a `Fn` definition that holds the geometry modification logic. A typical example
* would be a GPU based particle system that provides a node material for usage on app level. The particle
* simulation would be implemented as compute shaders and managed inside a `Fn` function. This function is
* eventually assigned to `geometryNode`.
*
* @type {Function}
* @default null
*/
this.geometryNode = null;
/**
* Allows to overwrite depth values in the fragment shader.
*
* @type {Node<float>?}
* @default null
*/
this.depthNode = null;
/**
* Allows to overwrite the position used for shadow map rendering which
* is by default {@link module:Position.positionWorld}, the vertex position
* in world space.
*
* @type {Node<float>?}
* @default null
*/
this.shadowPositionNode = null;
/**
* This node can be used to influence how an object using this node material
* receive shadows.
*
* ```js
* const totalShadows = float( 1 ).toVar();
* material.receivedShadowNode = Fn( ( [ shadow ] ) => {
* totalShadows.mulAssign( shadow );
* //return float( 1 ); // bypass received shadows
* return shadow.mix( color( 0xff0000 ), 1 ); // modify shadow color
* } );
*
* @type {Node<vec4>?}
* @default null
*/
this.receivedShadowNode = null;
/**
* This node can be used to influence how an object using this node material
* casts shadows. To apply a color to shadows, you can simply do:
*
* ```js
* material.castShadowNode = vec4( 1, 0, 0, 1 );
* ```
*
* Which can be nice to fake colored shadows of semi-transparent objects. It
* is also common to use the property with `Fn` function so checks are performed
* per fragment.
*
* ```js
* materialCustomShadow.castShadowNode = Fn( () => {
* hash( vertexIndex ).greaterThan( 0.5 ).discard();
* return materialColor;
* } )();
* ```
*
* @type {Node<vec4>?}
* @default null
*/
this.castShadowNode = null;
/**
* This node can be used to define the final output of the material.
*
* TODO: Explain the differences to `fragmentNode`.
*
* @type {Node<vec4>?}
* @default null
*/
this.outputNode = null;
/**
* MRT configuration is done on renderer or pass level. This node allows to
* overwrite what values are written into MRT targets on material level. This
* can be useful for implementing selective FX features that should only affect
* specific objects.
*
* @type {MRTNode?}
* @default null
*/
this.mrtNode = null;
/**
* This node property can be used if you need complete freedom in implementing
* the fragment shader. Assigning a node will replace the built-in material
* logic used in the fragment stage.
*
* @type {Node<vec4>?}
* @default null
*/
this.fragmentNode = null;
/**
* This node property can be used if you need complete freedom in implementing
* the vertex shader. Assigning a node will replace the built-in material logic
* used in the vertex stage.
*
* @type {Node<vec4>?}
* @default null
*/
this.vertexNode = null;
}
/**
* Allows to define a custom cache key that influence the material key computation
* for render objects.
*
* @return {String} The custom cache key.
*/
customProgramCacheKey() {
return this.type + getCacheKey$1( this );
}
/**
* Builds this material with the given node builder.
*
* @param {NodeBuilder} builder - The current node builder.
*/
build( builder ) {
this.setup( builder );
}
/**
* Setups a node material observer with the given builder.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {NodeMaterialObserver} The node material observer.
*/
setupObserver( builder ) {
return new NodeMaterialObserver( builder );
}
/**
* Setups the vertex and fragment stage of this node material.
*
* @param {NodeBuilder} builder - The current node builder.
*/
setup( builder ) {
builder.context.setupNormal = () => this.setupNormal( builder );
builder.context.setupPositionView = () => this.setupPositionView( builder );
builder.context.setupModelViewProjection = () => this.setupModelViewProjection( builder );
const renderer = builder.renderer;
const renderTarget = renderer.getRenderTarget();
// < VERTEX STAGE >
builder.addStack();
const vertexNode = this.vertexNode || this.setupVertex( builder );
builder.stack.outputNode = vertexNode;
this.setupHardwareClipping( builder );
if ( this.geometryNode !== null ) {
builder.stack.outputNode = builder.stack.outputNode.bypass( this.geometryNode );
}
builder.addFlow( 'vertex', builder.removeStack() );
// < FRAGMENT STAGE >
builder.addStack();
let resultNode;
const clippingNode = this.setupClipping( builder );
if ( this.depthWrite === true || this.depthTest === true ) {
// only write depth if depth buffer is configured
if ( renderTarget !== null ) {
if ( renderTarget.depthBuffer === true ) this.setupDepth( builder );
} else {
if ( renderer.depth === true ) this.setupDepth( builder );
}
}
if ( this.fragmentNode === null ) {
this.setupDiffuseColor( builder );
this.setupVariants( builder );
const outgoingLightNode = this.setupLighting( builder );
if ( clippingNode !== null ) builder.stack.add( clippingNode );
// force unsigned floats - useful for RenderTargets
const basicOutput = vec4( outgoingLightNode, diffuseColor.a ).max( 0 );
resultNode = this.setupOutput( builder, basicOutput );
// OUTPUT NODE
output.assign( resultNode );
//
if ( this.outputNode !== null ) resultNode = this.outputNode;
// MRT
if ( renderTarget !== null ) {
const mrt = renderer.getMRT();
const materialMRT = this.mrtNode;
if ( mrt !== null ) {
resultNode = mrt;
if ( materialMRT !== null ) {
resultNode = mrt.merge( materialMRT );
}
} else if ( materialMRT !== null ) {
resultNode = materialMRT;
}
}
} else {
let fragmentNode = this.fragmentNode;
if ( fragmentNode.isOutputStructNode !== true ) {
fragmentNode = vec4( fragmentNode );
}
resultNode = this.setupOutput( builder, fragmentNode );
}
builder.stack.outputNode = resultNode;
builder.addFlow( 'fragment', builder.removeStack() );
// < OBSERVER >
builder.observer = this.setupObserver( builder );
}
/**
* Setups the clipping node.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {ClippingNode} The clipping node.
*/
setupClipping( builder ) {
if ( builder.clippingContext === null ) return null;
const { unionPlanes, intersectionPlanes } = builder.clippingContext;
let result = null;
if ( unionPlanes.length > 0 || intersectionPlanes.length > 0 ) {
const samples = builder.renderer.samples;
if ( this.alphaToCoverage && samples > 1 ) {
// to be added to flow when the color/alpha value has been determined
result = clippingAlpha();
} else {
builder.stack.add( clipping() );
}
}
return result;
}
/**
* Setups the hardware clipping if available on the current device.
*
* @param {NodeBuilder} builder - The current node builder.
*/
setupHardwareClipping( builder ) {
this.hardwareClipping = false;
if ( builder.clippingContext === null ) return;
const candidateCount = builder.clippingContext.unionPlanes.length;
// 8 planes supported by WebGL ANGLE_clip_cull_distance and WebGPU clip-distances
if ( candidateCount > 0 && candidateCount <= 8 && builder.isAvailable( 'clipDistance' ) ) {
builder.stack.add( hardwareClipping() );
this.hardwareClipping = true;
}
return;
}
/**
* Setups the depth of this material.
*
* @param {NodeBuilder} builder - The current node builder.
*/
setupDepth( builder ) {
const { renderer, camera } = builder;
// Depth
let depthNode = this.depthNode;
if ( depthNode === null ) {
const mrt = renderer.getMRT();
if ( mrt && mrt.has( 'depth' ) ) {
depthNode = mrt.get( 'depth' );
} else if ( renderer.logarithmicDepthBuffer === true ) {
if ( camera.isPerspectiveCamera ) {
depthNode = viewZToLogarithmicDepth( positionView.z, cameraNear, cameraFar );
} else {
depthNode = viewZToOrthographicDepth( positionView.z, cameraNear, cameraFar );
}
}
}
if ( depthNode !== null ) {
depth.assign( depthNode ).append();
}
}
/**
* Setups the position node in view space. This method exists
* so derived node materials can modify the implementation e.g. sprite materials.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {Node<vec3>} The position in view space.
*/
setupPositionView( /*builder*/ ) {
return modelViewMatrix.mul( positionLocal ).xyz;
}
/**
* Setups the position in clip space.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {Node<vec4>} The position in view space.
*/
setupModelViewProjection( /*builder*/ ) {
return cameraProjectionMatrix.mul( positionView );
}
/**
* Setups the logic for the vertex stage.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {Node<vec4>} The position in clip space.
*/
setupVertex( builder ) {
builder.addStack();
this.setupPosition( builder );
builder.context.vertex = builder.removeStack();
return modelViewProjection;
}
/**
* Setups the computation of the position in local space.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {Node<vec3>} The position in local space.
*/
setupPosition( builder ) {
const { object, geometry } = builder;
if ( geometry.morphAttributes.position || geometry.morphAttributes.normal || geometry.morphAttributes.color ) {
morphReference( object ).append();
}
if ( object.isSkinnedMesh === true ) {
skinningReference( object ).append();
}
if ( this.displacementMap ) {
const displacementMap = materialReference( 'displacementMap', 'texture' );
const displacementScale = materialReference( 'displacementScale', 'float' );
const displacementBias = materialReference( 'displacementBias', 'float' );
positionLocal.addAssign( normalLocal.normalize().mul( ( displacementMap.x.mul( displacementScale ).add( displacementBias ) ) ) );
}
if ( object.isBatchedMesh ) {
batch( object ).append();
}
if ( ( object.isInstancedMesh && object.instanceMatrix && object.instanceMatrix.isInstancedBufferAttribute === true ) ) {
instancedMesh( object ).append();
}
if ( this.positionNode !== null ) {
positionLocal.assign( this.positionNode.context( { isPositionNodeInput: true } ) );
}
return positionLocal;
}
/**
* Setups the computation of the material's diffuse color.
*
* @param {NodeBuilder} builder - The current node builder.
* @param {BufferGeometry} geometry - The geometry.
*/
setupDiffuseColor( { object, geometry } ) {
let colorNode = this.colorNode ? vec4( this.colorNode ) : materialColor;
// VERTEX COLORS
if ( this.vertexColors === true && geometry.hasAttribute( 'color' ) ) {
colorNode = vec4( colorNode.xyz.mul( attribute( 'color', 'vec3' ) ), colorNode.a );
}
// Instanced colors
if ( object.instanceColor ) {
const instanceColor = varyingProperty( 'vec3', 'vInstanceColor' );
colorNode = instanceColor.mul( colorNode );
}
if ( object.isBatchedMesh && object._colorsTexture ) {
const batchColor = varyingProperty( 'vec3', 'vBatchColor' );
colorNode = batchColor.mul( colorNode );
}
// COLOR
diffuseColor.assign( colorNode );
// OPACITY
const opacityNode = this.opacityNode ? float( this.opacityNode ) : materialOpacity;
diffuseColor.a.assign( diffuseColor.a.mul( opacityNode ) );
// ALPHA TEST
if ( this.alphaTestNode !== null || this.alphaTest > 0 ) {
const alphaTestNode = this.alphaTestNode !== null ? float( this.alphaTestNode ) : materialAlphaTest;
diffuseColor.a.lessThanEqual( alphaTestNode ).discard();
}
// ALPHA HASH
if ( this.alphaHash === true ) {
diffuseColor.a.lessThan( getAlphaHashThreshold( positionLocal ) ).discard();
}
if ( this.transparent === false && this.blending === NormalBlending && this.alphaToCoverage === false ) {
diffuseColor.a.assign( 1.0 );
}
}
/**
* Abstract interface method that can be implemented by derived materials
* to setup material-specific node variables.
*
* @abstract
* @param {NodeBuilder} builder - The current node builder.
*/
setupVariants( /*builder*/ ) {
// Interface function.
}
/**
* Setups the outgoing light node variable
*
* @return {Node<vec3>} The outgoing light node.
*/
setupOutgoingLight() {
return ( this.lights === true ) ? vec3( 0 ) : diffuseColor.rgb;
}
/**
* Setups the normal node from the material.
*
* @return {Node<vec3>} The normal node.
*/
setupNormal() {
return this.normalNode ? vec3( this.normalNode ) : materialNormal;
}
/**
* Setups the environment node from the material.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {Node<vec4>} The environment node.
*/
setupEnvironment( /*builder*/ ) {
let node = null;
if ( this.envNode ) {
node = this.envNode;
} else if ( this.envMap ) {
node = this.envMap.isCubeTexture ? materialReference( 'envMap', 'cubeTexture' ) : materialReference( 'envMap', 'texture' );
}
return node;
}
/**
* Setups the light map node from the material.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {Node<vec3>} The light map node.
*/
setupLightMap( builder ) {
let node = null;
if ( builder.material.lightMap ) {
node = new IrradianceNode( materialLightMap );
}
return node;
}
/**
* Setups the lights node based on the scene, environment and material.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {LightsNode} The lights node.
*/
setupLights( builder ) {
const materialLightsNode = [];
//
const envNode = this.setupEnvironment( builder );
if ( envNode && envNode.isLightingNode ) {
materialLightsNode.push( envNode );
}
const lightMapNode = this.setupLightMap( builder );
if ( lightMapNode && lightMapNode.isLightingNode ) {
materialLightsNode.push( lightMapNode );
}
if ( this.aoNode !== null || builder.material.aoMap ) {
const aoNode = this.aoNode !== null ? this.aoNode : materialAO;
materialLightsNode.push( new AONode( aoNode ) );
}
let lightsN = this.lightsNode || builder.lightsNode;
if ( materialLightsNode.length > 0 ) {
lightsN = builder.renderer.lighting.createNode( [ ...lightsN.getLights(), ...materialLightsNode ] );
}
return lightsN;
}
/**
* This method should be implemented by most derived materials
* since it defines the material's lighting model.
*
* @abstract
* @param {NodeBuilder} builder - The current node builder.
* @return {LightingModel} The lighting model.
*/
setupLightingModel( /*builder*/ ) {
// Interface function.
}
/**
* Setups the outgoing light node.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {Node<vec3>} The outgoing light node.
*/
setupLighting( builder ) {
const { material } = builder;
const { backdropNode, backdropAlphaNode, emissiveNode } = this;
// OUTGOING LIGHT
const lights = this.lights === true || this.lightsNode !== null;
const lightsNode = lights ? this.setupLights( builder ) : null;
let outgoingLightNode = this.setupOutgoingLight( builder );
if ( lightsNode && lightsNode.getScope().hasLights ) {
const lightingModel = this.setupLightingModel( builder );
outgoingLightNode = lightingContext( lightsNode, lightingModel, backdropNode, backdropAlphaNode );
} else if ( backdropNode !== null ) {
outgoingLightNode = vec3( backdropAlphaNode !== null ? mix( outgoingLightNode, backdropNode, backdropAlphaNode ) : backdropNode );
}
// EMISSIVE
if ( ( emissiveNode && emissiveNode.isNode === true ) || ( material.emissive && material.emissive.isColor === true ) ) {
emissive.assign( vec3( emissiveNode ? emissiveNode : materialEmissive ) );
outgoingLightNode = outgoingLightNode.add( emissive );
}
return outgoingLightNode;
}
/**
* Setups the output node.
*
* @param {NodeBuilder} builder - The current node builder.
* @param {Node<vec4>} outputNode - The existing output node.
* @return {Node<vec4>} The output node.
*/
setupOutput( builder, outputNode ) {
// FOG
if ( this.fog === true ) {
const fogNode = builder.fogNode;
if ( fogNode ) {
output.assign( outputNode );
outputNode = vec4( fogNode );
}
}
return outputNode;
}
/**
* Most classic material types have a node pendant e.g. for `MeshBasicMaterial`
* there is `MeshBasicNodeMaterial`. This utility method is intended for
* defining all material properties of the classic type in the node type.
*
* @param {Material} material - The material to copy properties with their values to this node material.
*/
setDefaultValues( material ) {
// This approach is to reuse the native refreshUniforms*
// and turn available the use of features like transmission and environment in core
for ( const property in material ) {
const value = material[ property ];
if ( this[ property ] === undefined ) {
this[ property ] = value;
if ( value && value.clone ) this[ property ] = value.clone();
}
}
const descriptors = Object.getOwnPropertyDescriptors( material.constructor.prototype );
for ( const key in descriptors ) {
if ( Object.getOwnPropertyDescriptor( this.constructor.prototype, key ) === undefined &&
descriptors[ key ].get !== undefined ) {
Object.defineProperty( this.constructor.prototype, key, descriptors[ key ] );
}
}
}
/**
* Serializes this material to JSON.
*
* @param {(Object|String)?} meta - The meta information for serialization.
* @return {Object} The serialized node.
*/
toJSON( meta ) {
const isRoot = ( meta === undefined || typeof meta === 'string' );
if ( isRoot ) {
meta = {
textures: {},
images: {},
nodes: {}
};
}
const data = Material.prototype.toJSON.call( this, meta );
const nodeChildren = getNodeChildren( this );
data.inputNodes = {};
for ( const { property, childNode } of nodeChildren ) {
data.inputNodes[ property ] = childNode.toJSON( meta ).uuid;
}
// TODO: Copied from Object3D.toJSON
function extractFromCache( cache ) {
const values = [];
for ( const key in cache ) {
const data = cache[ key ];
delete data.metadata;
values.push( data );
}
return values;
}
if ( isRoot ) {
const textures = extractFromCache( meta.textures );
const images = extractFromCache( meta.images );
const nodes = extractFromCache( meta.nodes );
if ( textures.length > 0 ) data.textures = textures;
if ( images.length > 0 ) data.images = images;
if ( nodes.length > 0 ) data.nodes = nodes;
}
return data;
}
/**
* Copies the properties of the given node material to this instance.
*
* @param {NodeMaterial} source - The material to copy.
* @return {NodeMaterial} A reference to this node material.
*/
copy( source ) {
this.lightsNode = source.lightsNode;
this.envNode = source.envNode;
this.colorNode = source.colorNode;
this.normalNode = source.normalNode;
this.opacityNode = source.opacityNode;
this.backdropNode = source.backdropNode;
this.backdropAlphaNode = source.backdropAlphaNode;
this.alphaTestNode = source.alphaTestNode;
this.positionNode = source.positionNode;
this.geometryNode = source.geometryNode;
this.depthNode = source.depthNode;
this.shadowPositionNode = source.shadowPositionNode;
this.receivedShadowNode = source.receivedShadowNode;
this.castShadowNode = source.castShadowNode;
this.outputNode = source.outputNode;
this.mrtNode = source.mrtNode;
this.fragmentNode = source.fragmentNode;
this.vertexNode = source.vertexNode;
return super.copy( source );
}
}
const _defaultValues$d = /*@__PURE__*/ new LineBasicMaterial();
/**
* Node material version of `LineBasicMaterial`.
*
* @augments NodeMaterial
*/
class LineBasicNodeMaterial extends NodeMaterial {
static get type() {
return 'LineBasicNodeMaterial';
}
/**
* Constructs a new line basic node material.
*
* @param {Object?} parameters - The configuration parameter.
*/
constructor( parameters ) {
super();
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isLineBasicNodeMaterial = true;
this.setDefaultValues( _defaultValues$d );
this.setValues( parameters );
}
}
const _defaultValues$c = /*@__PURE__*/ new LineDashedMaterial();
/**
* Node material version of `LineDashedMaterial`.
*
* @augments NodeMaterial
*/
class LineDashedNodeMaterial extends NodeMaterial {
static get type() {
return 'LineDashedNodeMaterial';
}
/**
* Constructs a new line dashed node material.
*
* @param {Object?} parameters - The configuration parameter.
*/
constructor( parameters ) {
super();
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isLineDashedNodeMaterial = true;
this.setDefaultValues( _defaultValues$c );
/**
* The dash offset.
*
* @type {Number}
* @default 0
*/
this.dashOffset = 0;
/**
* The offset of dash materials is by default inferred from the `dashOffset`
* property. This node property allows to overwrite the default
* and define the offset with a node instead.
*
* If you don't want to overwrite the offset but modify the existing
* value instead, use {@link module:MaterialNode.materialLineDashOffset}.
*
* @type {Node<float>?}
* @default null
*/
this.offsetNode = null;
/**
* The scale of dash materials is by default inferred from the `scale`
* property. This node property allows to overwrite the default
* and define the scale with a node instead.
*
* If you don't want to overwrite the scale but modify the existing
* value instead, use {@link module:MaterialNode.materialLineScale}.
*
* @type {Node<float>?}
* @default null
*/
this.dashScaleNode = null;
/**
* The dash size of dash materials is by default inferred from the `dashSize`
* property. This node property allows to overwrite the default
* and define the dash size with a node instead.
*
* If you don't want to overwrite the dash size but modify the existing
* value instead, use {@link module:MaterialNode.materialLineDashSize}.
*
* @type {Node<float>?}
* @default null
*/
this.dashSizeNode = null;
/**
* The gap size of dash materials is by default inferred from the `gapSize`
* property. This node property allows to overwrite the default
* and define the gap size with a node instead.
*
* If you don't want to overwrite the gap size but modify the existing
* value instead, use {@link module:MaterialNode.materialLineGapSize}.
*
* @type {Node<float>?}
* @default null
*/
this.gapSizeNode = null;
this.setValues( parameters );
}
/**
* Setups the dash specific node variables.
*
* @param {NodeBuilder} builder - The current node builder.
*/
setupVariants( /* builder */ ) {
const offsetNode = this.offsetNode ? float( this.offsetNode ) : materialLineDashOffset;
const dashScaleNode = this.dashScaleNode ? float( this.dashScaleNode ) : materialLineScale;
const dashSizeNode = this.dashSizeNode ? float( this.dashSizeNode ) : materialLineDashSize;
const gapSizeNode = this.gapSizeNode ? float( this.gapSizeNode ) : materialLineGapSize;
dashSize.assign( dashSizeNode );
gapSize.assign( gapSizeNode );
const vLineDistance = varying( attribute( 'lineDistance' ).mul( dashScaleNode ) );
const vLineDistanceOffset = offsetNode ? vLineDistance.add( offsetNode ) : vLineDistance;
vLineDistanceOffset.mod( dashSize.add( gapSize ) ).greaterThan( dashSize ).discard();
}
}
/** @module ViewportSharedTextureNode **/
let _sharedFramebuffer = null;
/**
* `ViewportTextureNode` creates an internal texture for each node instance. This module
* shares a texture across all instances of `ViewportSharedTextureNode`. It should
* be the first choice when using data of the default/screen framebuffer for performance reasons.
*
* @augments module:ViewportTextureNode~ViewportTextureNode
*/
class ViewportSharedTextureNode extends ViewportTextureNode {
static get type() {
return 'ViewportSharedTextureNode';
}
/**
* Constructs a new viewport shared texture node.
*
* @param {Node} [uvNode=screenUV] - The uv node.
* @param {Node?} [levelNode=null] - The level node.
*/
constructor( uvNode = screenUV, levelNode = null ) {
if ( _sharedFramebuffer === null ) {
_sharedFramebuffer = new FramebufferTexture();
}
super( uvNode, levelNode, _sharedFramebuffer );
}
updateReference() {
return this;
}
}
/**
* TSL function for creating a shared viewport texture node.
*
* @function
* @param {Node} [uvNode=screenUV] - The uv node.
* @param {Node?} [levelNode=null] - The level node.
* @returns {ViewportSharedTextureNode}
*/
const viewportSharedTexture = /*@__PURE__*/ nodeProxy( ViewportSharedTextureNode );
const _defaultValues$b = /*@__PURE__*/ new LineDashedMaterial();
/**
* This node material can be used to render lines with a size larger than one
* by representing them as instanced meshes.
*
* @augments NodeMaterial
*/
class Line2NodeMaterial extends NodeMaterial {
static get type() {
return 'Line2NodeMaterial';
}
/**
* Constructs a new node material for wide line rendering.
*
* @param {Object?} parameters - The configuration parameter.
*/
constructor( parameters = {} ) {
super();
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isLine2NodeMaterial = true;
this.setDefaultValues( _defaultValues$b );
/**
* Whether vertex colors should be used or not.
*
* @type {Boolean}
* @default false
*/
this.useColor = parameters.vertexColors;
/**
* The dash offset.
*
* @type {Number}
* @default 0
*/
this.dashOffset = 0;
/**
* The line width.
*
* @type {Number}
* @default 0
*/
this.lineWidth = 1;
/**
* Defines the lines color.
*
* @type {Node<vec3>?}
* @default null
*/
this.lineColorNode = null;
/**
* Defines the offset.
*
* @type {Node<float>?}
* @default null
*/
this.offsetNode = null;
/**
* Defines the dash scale.
*
* @type {Node<float>?}
* @default null
*/
this.dashScaleNode = null;
/**
* Defines the dash size.
*
* @type {Node<float>?}
* @default null
*/
this.dashSizeNode = null;
/**
* Defines the gap size.
*
* @type {Node<float>?}
* @default null
*/
this.gapSizeNode = null;
/**
* Blending is set to `NoBlending` since transparency
* is not supported, yet.
*
* @type {Number}
* @default 0
*/
this.blending = NoBlending;
this._useDash = parameters.dashed;
this._useAlphaToCoverage = true;
this._useWorldUnits = false;
this.setValues( parameters );
}
/**
* Setups the vertex and fragment stage of this node material.
*
* @param {NodeBuilder} builder - The current node builder.
*/
setup( builder ) {
const { renderer } = builder;
const useAlphaToCoverage = this._useAlphaToCoverage;
const useColor = this.useColor;
const useDash = this._useDash;
const useWorldUnits = this._useWorldUnits;
const trimSegment = Fn( ( { start, end } ) => {
const a = cameraProjectionMatrix.element( 2 ).element( 2 ); // 3nd entry in 3th column
const b = cameraProjectionMatrix.element( 3 ).element( 2 ); // 3nd entry in 4th column
const nearEstimate = b.mul( - 0.5 ).div( a );
const alpha = nearEstimate.sub( start.z ).div( end.z.sub( start.z ) );
return vec4( mix( start.xyz, end.xyz, alpha ), end.w );
} ).setLayout( {
name: 'trimSegment',
type: 'vec4',
inputs: [
{ name: 'start', type: 'vec4' },
{ name: 'end', type: 'vec4' }
]
} );
this.vertexNode = Fn( () => {
const instanceStart = attribute( 'instanceStart' );
const instanceEnd = attribute( 'instanceEnd' );
// camera space
const start = vec4( modelViewMatrix.mul( vec4( instanceStart, 1.0 ) ) ).toVar( 'start' );
const end = vec4( modelViewMatrix.mul( vec4( instanceEnd, 1.0 ) ) ).toVar( 'end' );
if ( useDash ) {
const dashScaleNode = this.dashScaleNode ? float( this.dashScaleNode ) : materialLineScale;
const offsetNode = this.offsetNode ? float( this.offsetNode ) : materialLineDashOffset;
const instanceDistanceStart = attribute( 'instanceDistanceStart' );
const instanceDistanceEnd = attribute( 'instanceDistanceEnd' );
let lineDistance = positionGeometry.y.lessThan( 0.5 ).select( dashScaleNode.mul( instanceDistanceStart ), dashScaleNode.mul( instanceDistanceEnd ) );
lineDistance = lineDistance.add( offsetNode );
varyingProperty( 'float', 'lineDistance' ).assign( lineDistance );
}
if ( useWorldUnits ) {
varyingProperty( 'vec3', 'worldStart' ).assign( start.xyz );
varyingProperty( 'vec3', 'worldEnd' ).assign( end.xyz );
}
const aspect = viewport.z.div( viewport.w );
// special case for perspective projection, and segments that terminate either in, or behind, the camera plane
// clearly the gpu firmware has a way of addressing this issue when projecting into ndc space
// but we need to perform ndc-space calculations in the shader, so we must address this issue directly
// perhaps there is a more elegant solution -- WestLangley
const perspective = cameraProjectionMatrix.element( 2 ).element( 3 ).equal( - 1.0 ); // 4th entry in the 3rd column
If( perspective, () => {
If( start.z.lessThan( 0.0 ).and( end.z.greaterThan( 0.0 ) ), () => {
end.assign( trimSegment( { start: start, end: end } ) );
} ).ElseIf( end.z.lessThan( 0.0 ).and( start.z.greaterThanEqual( 0.0 ) ), () => {
start.assign( trimSegment( { start: end, end: start } ) );
} );
} );
// clip space
const clipStart = cameraProjectionMatrix.mul( start );
const clipEnd = cameraProjectionMatrix.mul( end );
// ndc space
const ndcStart = clipStart.xyz.div( clipStart.w );
const ndcEnd = clipEnd.xyz.div( clipEnd.w );
// direction
const dir = ndcEnd.xy.sub( ndcStart.xy ).toVar();
// account for clip-space aspect ratio
dir.x.assign( dir.x.mul( aspect ) );
dir.assign( dir.normalize() );
const clip = vec4().toVar();
if ( useWorldUnits ) {
// get the offset direction as perpendicular to the view vector
const worldDir = end.xyz.sub( start.xyz ).normalize();
const tmpFwd = mix( start.xyz, end.xyz, 0.5 ).normalize();
const worldUp = worldDir.cross( tmpFwd ).normalize();
const worldFwd = worldDir.cross( worldUp );
const worldPos = varyingProperty( 'vec4', 'worldPos' );
worldPos.assign( positionGeometry.y.lessThan( 0.5 ).select( start, end ) );
// height offset
const hw = materialLineWidth.mul( 0.5 );
worldPos.addAssign( vec4( positionGeometry.x.lessThan( 0.0 ).select( worldUp.mul( hw ), worldUp.mul( hw ).negate() ), 0 ) );
// don't extend the line if we're rendering dashes because we
// won't be rendering the endcaps
if ( ! useDash ) {
// cap extension
worldPos.addAssign( vec4( positionGeometry.y.lessThan( 0.5 ).select( worldDir.mul( hw ).negate(), worldDir.mul( hw ) ), 0 ) );
// add width to the box
worldPos.addAssign( vec4( worldFwd.mul( hw ), 0 ) );
// endcaps
If( positionGeometry.y.greaterThan( 1.0 ).or( positionGeometry.y.lessThan( 0.0 ) ), () => {
worldPos.subAssign( vec4( worldFwd.mul( 2.0 ).mul( hw ), 0 ) );
} );
}
// project the worldpos
clip.assign( cameraProjectionMatrix.mul( worldPos ) );
// shift the depth of the projected points so the line
// segments overlap neatly
const clipPose = vec3().toVar();
clipPose.assign( positionGeometry.y.lessThan( 0.5 ).select( ndcStart, ndcEnd ) );
clip.z.assign( clipPose.z.mul( clip.w ) );
} else {
const offset = vec2( dir.y, dir.x.negate() ).toVar( 'offset' );
// undo aspect ratio adjustment
dir.x.assign( dir.x.div( aspect ) );
offset.x.assign( offset.x.div( aspect ) );
// sign flip
offset.assign( positionGeometry.x.lessThan( 0.0 ).select( offset.negate(), offset ) );
// endcaps
If( positionGeometry.y.lessThan( 0.0 ), () => {
offset.assign( offset.sub( dir ) );
} ).ElseIf( positionGeometry.y.greaterThan( 1.0 ), () => {
offset.assign( offset.add( dir ) );
} );
// adjust for linewidth
offset.assign( offset.mul( materialLineWidth ) );
// adjust for clip-space to screen-space conversion // maybe resolution should be based on viewport ...
offset.assign( offset.div( viewport.w ) );
// select end
clip.assign( positionGeometry.y.lessThan( 0.5 ).select( clipStart, clipEnd ) );
// back to clip space
offset.assign( offset.mul( clip.w ) );
clip.assign( clip.add( vec4( offset, 0, 0 ) ) );
}
return clip;
} )();
const closestLineToLine = Fn( ( { p1, p2, p3, p4 } ) => {
const p13 = p1.sub( p3 );
const p43 = p4.sub( p3 );
const p21 = p2.sub( p1 );
const d1343 = p13.dot( p43 );
const d4321 = p43.dot( p21 );
const d1321 = p13.dot( p21 );
const d4343 = p43.dot( p43 );
const d2121 = p21.dot( p21 );
const denom = d2121.mul( d4343 ).sub( d4321.mul( d4321 ) );
const numer = d1343.mul( d4321 ).sub( d1321.mul( d4343 ) );
const mua = numer.div( denom ).clamp();
const mub = d1343.add( d4321.mul( mua ) ).div( d4343 ).clamp();
return vec2( mua, mub );
} );
this.colorNode = Fn( () => {
const vUv = uv();
if ( useDash ) {
const dashSizeNode = this.dashSizeNode ? float( this.dashSizeNode ) : materialLineDashSize;
const gapSizeNode = this.gapSizeNode ? float( this.gapSizeNode ) : materialLineGapSize;
dashSize.assign( dashSizeNode );
gapSize.assign( gapSizeNode );
const vLineDistance = varyingProperty( 'float', 'lineDistance' );
vUv.y.lessThan( - 1.0 ).or( vUv.y.greaterThan( 1.0 ) ).discard(); // discard endcaps
vLineDistance.mod( dashSize.add( gapSize ) ).greaterThan( dashSize ).discard(); // todo - FIX
}
const alpha = float( 1 ).toVar( 'alpha' );
if ( useWorldUnits ) {
const worldStart = varyingProperty( 'vec3', 'worldStart' );
const worldEnd = varyingProperty( 'vec3', 'worldEnd' );
// Find the closest points on the view ray and the line segment
const rayEnd = varyingProperty( 'vec4', 'worldPos' ).xyz.normalize().mul( 1e5 );
const lineDir = worldEnd.sub( worldStart );
const params = closestLineToLine( { p1: worldStart, p2: worldEnd, p3: vec3( 0.0, 0.0, 0.0 ), p4: rayEnd } );
const p1 = worldStart.add( lineDir.mul( params.x ) );
const p2 = rayEnd.mul( params.y );
const delta = p1.sub( p2 );
const len = delta.length();
const norm = len.div( materialLineWidth );
if ( ! useDash ) {
if ( useAlphaToCoverage && renderer.samples > 1 ) {
const dnorm = norm.fwidth();
alpha.assign( smoothstep( dnorm.negate().add( 0.5 ), dnorm.add( 0.5 ), norm ).oneMinus() );
} else {
norm.greaterThan( 0.5 ).discard();
}
}
} else {
// round endcaps
if ( useAlphaToCoverage && renderer.samples > 1 ) {
const a = vUv.x;
const b = vUv.y.greaterThan( 0.0 ).select( vUv.y.sub( 1.0 ), vUv.y.add( 1.0 ) );
const len2 = a.mul( a ).add( b.mul( b ) );
const dlen = float( len2.fwidth() ).toVar( 'dlen' );
If( vUv.y.abs().greaterThan( 1.0 ), () => {
alpha.assign( smoothstep( dlen.oneMinus(), dlen.add( 1 ), len2 ).oneMinus() );
} );
} else {
If( vUv.y.abs().greaterThan( 1.0 ), () => {
const a = vUv.x;
const b = vUv.y.greaterThan( 0.0 ).select( vUv.y.sub( 1.0 ), vUv.y.add( 1.0 ) );
const len2 = a.mul( a ).add( b.mul( b ) );
len2.greaterThan( 1.0 ).discard();
} );
}
}
let lineColorNode;
if ( this.lineColorNode ) {
lineColorNode = this.lineColorNode;
} else {
if ( useColor ) {
const instanceColorStart = attribute( 'instanceColorStart' );
const instanceColorEnd = attribute( 'instanceColorEnd' );
const instanceColor = positionGeometry.y.lessThan( 0.5 ).select( instanceColorStart, instanceColorEnd );
lineColorNode = instanceColor.mul( materialColor );
} else {
lineColorNode = materialColor;
}
}
return vec4( lineColorNode, alpha );
} )();
if ( this.transparent ) {
const opacityNode = this.opacityNode ? float( this.opacityNode ) : materialOpacity;
this.outputNode = vec4( this.colorNode.rgb.mul( opacityNode ).add( viewportSharedTexture().rgb.mul( opacityNode.oneMinus() ) ), this.colorNode.a );
}
super.setup( builder );
}
/**
* Whether the lines should sized in world units or not.
* When set to `false` the unit is pixel.
*
* @type {Boolean}
* @default false
*/
get worldUnits() {
return this._useWorldUnits;
}
set worldUnits( value ) {
if ( this._useWorldUnits !== value ) {
this._useWorldUnits = value;
this.needsUpdate = true;
}
}
/**
* Whether the lines should be dashed or not.
*
* @type {Boolean}
* @default false
*/
get dashed() {
return this._useDash;
}
set dashed( value ) {
if ( this._useDash !== value ) {
this._useDash = value;
this.needsUpdate = true;
}
}
/**
* Whether alpha to coverage should be used or not.
*
* @type {Boolean}
* @default true
*/
get alphaToCoverage() {
return this._useAlphaToCoverage;
}
set alphaToCoverage( value ) {
if ( this._useAlphaToCoverage !== value ) {
this._useAlphaToCoverage = value;
this.needsUpdate = true;
}
}
}
/** @module Packing **/
/**
* Packs a direction vector into a color value.
*
* @method
* @param {Node<vec3>} node - The direction to pack.
* @return {Node<vec3>} The color.
*/
const directionToColor = ( node ) => nodeObject( node ).mul( 0.5 ).add( 0.5 );
/**
* Unpacks a color value into a direction vector.
*
* @method
* @param {Node<vec3>} node - The color to unpack.
* @return {Node<vec3>} The direction.
*/
const colorToDirection = ( node ) => nodeObject( node ).mul( 2.0 ).sub( 1 );
const _defaultValues$a = /*@__PURE__*/ new MeshNormalMaterial();
/**
* Node material version of `MeshNormalMaterial`.
*
* @augments NodeMaterial
*/
class MeshNormalNodeMaterial extends NodeMaterial {
static get type() {
return 'MeshNormalNodeMaterial';
}
/**
* Constructs a new mesh normal node material.
*
* @param {Object?} parameters - The configuration parameter.
*/
constructor( parameters ) {
super();
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isMeshNormalNodeMaterial = true;
this.setDefaultValues( _defaultValues$a );
this.setValues( parameters );
}
/**
* Overwrites the default implementation by computing the diffuse color
* based on the normal data.
*/
setupDiffuseColor() {
const opacityNode = this.opacityNode ? float( this.opacityNode ) : materialOpacity;
diffuseColor.assign( vec4( directionToColor( transformedNormalView ), opacityNode ) );
}
}
/** @module EquirectUVNode **/
/**
* Can be used to compute texture coordinates for projecting an
* equirectangular texture onto a mesh for using it as the scene's
* background.
*
* ```js
* scene.backgroundNode = texture( equirectTexture, equirectUV() );
* ```
*
* @augments TempNode
*/
class EquirectUVNode extends TempNode {
static get type() {
return 'EquirectUVNode';
}
/**
* Constructs a new equirect uv node.
*
* @param {Node<vec3>} [dirNode=positionWorldDirection] - A direction vector for sampling which is by default `positionWorldDirection`.
*/
constructor( dirNode = positionWorldDirection ) {
super( 'vec2' );
/**
* A direction vector for sampling why is by default `positionWorldDirection`.
*
* @type {Node<vec3>}
*/
this.dirNode = dirNode;
}
setup() {
const dir = this.dirNode;
const u = dir.z.atan( dir.x ).mul( 1 / ( Math.PI * 2 ) ).add( 0.5 );
const v = dir.y.clamp( - 1.0, 1.0 ).asin().mul( 1 / Math.PI ).add( 0.5 );
return vec2( u, v );
}
}
/**
* TSL function for creating an equirect uv node.
*
* @function
* @param {Node<vec3>} [dirNode=positionWorldDirection] - A direction vector for sampling which is by default `positionWorldDirection`.
* @returns {EquirectUVNode}
*/
const equirectUV = /*@__PURE__*/ nodeProxy( EquirectUVNode );
// @TODO: Consider rename WebGLCubeRenderTarget to just CubeRenderTarget
/**
* This class represents a cube render target. It is a special version
* of `WebGLCubeRenderTarget` which is compatible with `WebGPURenderer`.
*
* @augments WebGLCubeRenderTarget
*/
class CubeRenderTarget extends WebGLCubeRenderTarget {
constructor( size = 1, options = {} ) {
super( size, options );
this.isCubeRenderTarget = true;
}
/**
* Converts the given equirectangular texture to a cube map.
*
* @param {Renderer} renderer - The renderer.
* @param {Texture} texture - The equirectangular texture.
* @return {CubeRenderTarget} A reference to this cube render target.
*/
fromEquirectangularTexture( renderer, texture$1 ) {
const currentMinFilter = texture$1.minFilter;
const currentGenerateMipmaps = texture$1.generateMipmaps;
texture$1.generateMipmaps = true;
this.texture.type = texture$1.type;
this.texture.colorSpace = texture$1.colorSpace;
this.texture.generateMipmaps = texture$1.generateMipmaps;
this.texture.minFilter = texture$1.minFilter;
this.texture.magFilter = texture$1.magFilter;
const geometry = new BoxGeometry( 5, 5, 5 );
const uvNode = equirectUV( positionWorldDirection );
const material = new NodeMaterial();
material.colorNode = texture( texture$1, uvNode, 0 );
material.side = BackSide;
material.blending = NoBlending;
const mesh = new Mesh( geometry, material );
const scene = new Scene();
scene.add( mesh );
// Avoid blurred poles
if ( texture$1.minFilter === LinearMipmapLinearFilter ) texture$1.minFilter = LinearFilter;
const camera = new CubeCamera( 1, 10, this );
const currentMRT = renderer.getMRT();
renderer.setMRT( null );
camera.update( renderer, scene );
renderer.setMRT( currentMRT );
texture$1.minFilter = currentMinFilter;
texture$1.currentGenerateMipmaps = currentGenerateMipmaps;
mesh.geometry.dispose();
mesh.material.dispose();
return this;
}
}
/** @module CubeMapNode **/
const _cache$1 = new WeakMap();
/**
* This node can be used to automatically convert environment maps in the
* equirectangular format into the cube map format.
*
* @augments TempNode
*/
class CubeMapNode extends TempNode {
static get type() {
return 'CubeMapNode';
}
/**
* Constructs a new cube map node.
*
* @param {Node} envNode - The node representing the environment map.
*/
constructor( envNode ) {
super( 'vec3' );
/**
* The node representing the environment map.
*
* @type {Node}
*/
this.envNode = envNode;
/**
* A reference to the internal cube texture.
*
* @private
* @type {CubeTexture}
* @default null
*/
this._cubeTexture = null;
/**
* A reference to the internal cube texture node.
*
* @private
* @type {CubeTextureNode}
*/
this._cubeTextureNode = cubeTexture();
const defaultTexture = new CubeTexture();
defaultTexture.isRenderTargetTexture = true;
/**
* A default cube texture that acts as a placeholder.
* It is used when the conversion from equirectangular to cube
* map has not finished yet for a given texture.
*
* @private
* @type {CubeTexture}
*/
this._defaultTexture = defaultTexture;
/**
* The `updateBeforeType` is set to `NodeUpdateType.RENDER` since the node updates
* the texture once per render in its {@link CubeMapNode#updateBefore} method.
*
* @type {String}
* @default 'render'
*/
this.updateBeforeType = NodeUpdateType.RENDER;
}
updateBefore( frame ) {
const { renderer, material } = frame;
const envNode = this.envNode;
if ( envNode.isTextureNode || envNode.isMaterialReferenceNode ) {
const texture = ( envNode.isTextureNode ) ? envNode.value : material[ envNode.property ];
if ( texture && texture.isTexture ) {
const mapping = texture.mapping;
if ( mapping === EquirectangularReflectionMapping || mapping === EquirectangularRefractionMapping ) {
// check for converted cubemap map
if ( _cache$1.has( texture ) ) {
const cubeMap = _cache$1.get( texture );
mapTextureMapping( cubeMap, texture.mapping );
this._cubeTexture = cubeMap;
} else {
// create cube map from equirectangular map
const image = texture.image;
if ( isEquirectangularMapReady$1( image ) ) {
const renderTarget = new CubeRenderTarget( image.height );
renderTarget.fromEquirectangularTexture( renderer, texture );
mapTextureMapping( renderTarget.texture, texture.mapping );
this._cubeTexture = renderTarget.texture;
_cache$1.set( texture, renderTarget.texture );
texture.addEventListener( 'dispose', onTextureDispose );
} else {
// default cube texture as fallback when equirectangular texture is not yet loaded
this._cubeTexture = this._defaultTexture;
}
}
//
this._cubeTextureNode.value = this._cubeTexture;
} else {
// envNode already refers to a cube map
this._cubeTextureNode = this.envNode;
}
}
}
}
setup( builder ) {
this.updateBefore( builder );
return this._cubeTextureNode;
}
}
/**
* Returns true if the given equirectangular image has been fully loaded
* and is ready for further processing.
*
* @private
* @param {Image} image - The equirectangular image to check.
* @return {Boolean} Whether the image is ready or not.
*/
function isEquirectangularMapReady$1( image ) {
if ( image === null || image === undefined ) return false;
return image.height > 0;
}
/**
* This function is executed when `dispose()` is called on the equirectangular
* texture. In this case, the generated cube map with its render target
* is deleted as well.
*
* @private
* @param {Object} event - The event object.
*/
function onTextureDispose( event ) {
const texture = event.target;
texture.removeEventListener( 'dispose', onTextureDispose );
const renderTarget = _cache$1.get( texture );
if ( renderTarget !== undefined ) {
_cache$1.delete( texture );
renderTarget.dispose();
}
}
/**
* This function makes sure the generated cube map uses the correct
* texture mapping that corresponds to the equirectangular original.
*
* @private
* @param {Texture} texture - The cube texture.
* @param {Number} mapping - The original texture mapping.
*/
function mapTextureMapping( texture, mapping ) {
if ( mapping === EquirectangularReflectionMapping ) {
texture.mapping = CubeReflectionMapping;
} else if ( mapping === EquirectangularRefractionMapping ) {
texture.mapping = CubeRefractionMapping;
}
}
/**
* TSL function for creating a cube map node.
*
* @function
* @param {Node} envNode - The node representing the environment map.
* @returns {CubeMapNode}
*/
const cubeMapNode = /*@__PURE__*/ nodeProxy( CubeMapNode );
/**
* Represents a basic model for Image-based lighting (IBL). The environment
* is defined via environment maps in the equirectangular or cube map format.
* `BasicEnvironmentNode` is intended for non-PBR materials like {@link MeshBasicNodeMaterial}
* or {@link MeshPhongNodeMaterial}.
*
* @augments LightingNode
*/
class BasicEnvironmentNode extends LightingNode {
static get type() {
return 'BasicEnvironmentNode';
}
/**
* Constructs a new basic environment node.
*
* @param {Node} [envNode=null] - A node representing the environment.
*/
constructor( envNode = null ) {
super();
/**
* A node representing the environment.
*
* @type {Node}
* @default null
*/
this.envNode = envNode;
}
setup( builder ) {
// environment property is used in the finish() method of BasicLightingModel
builder.context.environment = cubeMapNode( this.envNode );
}
}
/**
* A specific version of {@link IrradianceNode} that is only relevant
* for {@link MeshBasicNodeMaterial}. Since the material is unlit, it
* requires a special scaling factor for the light map.
*
* @augments LightingNode
*/
class BasicLightMapNode extends LightingNode {
static get type() {
return 'BasicLightMapNode';
}
/**
* Constructs a new basic light map node.
*
* @param {Node<vec3>?} [lightMapNode=null] - The light map node.
*/
constructor( lightMapNode = null ) {
super();
/**
* The light map node.
*
* @type {Node<vec3>?}
*/
this.lightMapNode = lightMapNode;
}
setup( builder ) {
// irradianceLightMap property is used in the indirectDiffuse() method of BasicLightingModel
const RECIPROCAL_PI = float( 1 / Math.PI );
builder.context.irradianceLightMap = this.lightMapNode.mul( RECIPROCAL_PI );
}
}
/**
* Abstract class for implementing lighting models. The module defines
* multiple methods that concrete lighting models can implement. These
* methods are executed at different points during the light evaluation
* process.
*/
class LightingModel {
/**
* This method is intended for setting up lighting model and context data
* which are later used in the evaluation process.
*
* @abstract
* @param {ContextNode} input - The current node context.
* @param {StackNode} stack - The current stack.
* @param {NodeBuilder} builder - The current node builder.
*/
start( /*input, stack, builder*/ ) { }
/**
* This method is intended for executing final tasks like final updates
* to the outgoing light.
*
* @abstract
* @param {ContextNode} input - The current node context.
* @param {StackNode} stack - The current stack.
* @param {NodeBuilder} builder - The current node builder.
*/
finish( /*input, stack, builder*/ ) { }
/**
* This method is intended for implementing the direct light term and
* executed during the build process of directional, point and spot light nodes.
*
* @abstract
* @param {Object} input - The input data.
* @param {StackNode} stack - The current stack.
* @param {NodeBuilder} builder - The current node builder.
*/
direct( /*input, stack, builder*/ ) { }
/**
* This method is intended for implementing the direct light term for
* rect area light nodes.
*
* @abstract
* @param {Object} input - The input data.
* @param {StackNode} stack - The current stack.
* @param {NodeBuilder} builder - The current node builder.
*/
directRectArea( /*input, stack, builder*/ ) {}
/**
* This method is intended for implementing the indirect light term.
*
* @abstract
* @param {ContextNode} input - The current node context.
* @param {StackNode} stack - The current stack.
* @param {NodeBuilder} builder - The current node builder.
*/
indirect( /*input, stack, builder*/ ) { }
/**
* This method is intended for implementing the ambient occlusion term.
* Unlike other methods, this method must be called manually by the lighting
* model in its indirect term.
*
* @abstract
* @param {ContextNode} input - The current node context.
* @param {StackNode} stack - The current stack.
* @param {NodeBuilder} builder - The current node builder.
*/
ambientOcclusion( /*input, stack, builder*/ ) { }
}
/**
* Represents the lighting model for unlit materials. The only light contribution
* is baked indirect lighting modulated with ambient occlusion and the material's
* diffuse color. Environment mapping is supported. Used in {@link MeshBasicNodeMaterial}.
*
* @augments LightingModel
*/
class BasicLightingModel extends LightingModel {
/**
* Constructs a new basic lighting model.
*/
constructor() {
super();
}
/**
* Implements the baked indirect lighting with its modulation.
*
* @param {ContextNode} context - The current node context.
* @param {StackNode} stack - The current stack.
* @param {NodeBuilder} builder - The current node builder.
*/
indirect( context, stack, builder ) {
const ambientOcclusion = context.ambientOcclusion;
const reflectedLight = context.reflectedLight;
const irradianceLightMap = builder.context.irradianceLightMap;
reflectedLight.indirectDiffuse.assign( vec4( 0.0 ) );
// accumulation (baked indirect lighting only)
if ( irradianceLightMap ) {
reflectedLight.indirectDiffuse.addAssign( irradianceLightMap );
} else {
reflectedLight.indirectDiffuse.addAssign( vec4( 1.0, 1.0, 1.0, 0.0 ) );
}
// modulation
reflectedLight.indirectDiffuse.mulAssign( ambientOcclusion );
reflectedLight.indirectDiffuse.mulAssign( diffuseColor.rgb );
}
/**
* Implements the environment mapping.
*
* @param {ContextNode} context - The current node context.
* @param {StackNode} stack - The current stack.
* @param {NodeBuilder} builder - The current node builder.
*/
finish( context, stack, builder ) {
const material = builder.material;
const outgoingLight = context.outgoingLight;
const envNode = builder.context.environment;
if ( envNode ) {
switch ( material.combine ) {
case MultiplyOperation:
outgoingLight.rgb.assign( mix( outgoingLight.rgb, outgoingLight.rgb.mul( envNode.rgb ), materialSpecularStrength.mul( materialReflectivity ) ) );
break;
case MixOperation:
outgoingLight.rgb.assign( mix( outgoingLight.rgb, envNode.rgb, materialSpecularStrength.mul( materialReflectivity ) ) );
break;
case AddOperation:
outgoingLight.rgb.addAssign( envNode.rgb.mul( materialSpecularStrength.mul( materialReflectivity ) ) );
break;
default:
console.warn( 'THREE.BasicLightingModel: Unsupported .combine value:', material.combine );
break;
}
}
}
}
const _defaultValues$9 = /*@__PURE__*/ new MeshBasicMaterial();
/**
* Node material version of `MeshBasicMaterial`.
*
* @augments NodeMaterial
*/
class MeshBasicNodeMaterial extends NodeMaterial {
static get type() {
return 'MeshBasicNodeMaterial';
}
/**
* Constructs a new mesh basic node material.
*
* @param {Object?} parameters - The configuration parameter.
*/
constructor( parameters ) {
super();
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isMeshBasicNodeMaterial = true;
/**
* Although the basic material is by definition unlit, we set
* this property to `true` since we use a lighting model to compute
* the outgoing light of the fragment shader.
*
* @type {Boolean}
* @default true
*/
this.lights = true;
this.setDefaultValues( _defaultValues$9 );
this.setValues( parameters );
}
/**
* Basic materials are not affected by normal and bump maps so we
* return by default {@link module:Normal.normalView}.
*
* @return {Node<vec3>} The normal node.
*/
setupNormal() {
return normalView; // see #28839
}
/**
* Overwritten since this type of material uses {@link BasicEnvironmentNode}
* to implement the default environment mapping.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {BasicEnvironmentNode<vec3>?} The environment node.
*/
setupEnvironment( builder ) {
const envNode = super.setupEnvironment( builder );
return envNode ? new BasicEnvironmentNode( envNode ) : null;
}
/**
* This method must be overwritten since light maps are evaluated
* with a special scaling factor for basic materials.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {BasicLightMapNode<vec3>?} The light map node.
*/
setupLightMap( builder ) {
let node = null;
if ( builder.material.lightMap ) {
node = new BasicLightMapNode( materialLightMap );
}
return node;
}
/**
* The material overwrites this method because `lights` is set to `true` but
* we still want to return the diffuse color as the outgoing light.
*
* @return {Node<vec3>} The outgoing light node.
*/
setupOutgoingLight() {
return diffuseColor.rgb;
}
/**
* Setups the lighting model.
*
* @return {BasicLightingModel} The lighting model.
*/
setupLightingModel() {
return new BasicLightingModel();
}
}
const F_Schlick = /*@__PURE__*/ Fn( ( { f0, f90, dotVH } ) => {
// Original approximation by Christophe Schlick '94
// float fresnel = pow( 1.0 - dotVH, 5.0 );
// Optimized variant (presented by Epic at SIGGRAPH '13)
// https://cdn2.unrealengine.com/Resources/files/2013SiggraphPresentationsNotes-26915738.pdf
const fresnel = dotVH.mul( - 5.55473 ).sub( 6.98316 ).mul( dotVH ).exp2();
return f0.mul( fresnel.oneMinus() ).add( f90.mul( fresnel ) );
} ); // validated
const BRDF_Lambert = /*@__PURE__*/ Fn( ( inputs ) => {
return inputs.diffuseColor.mul( 1 / Math.PI ); // punctual light
} ); // validated
const G_BlinnPhong_Implicit = () => float( 0.25 );
const D_BlinnPhong = /*@__PURE__*/ Fn( ( { dotNH } ) => {
return shininess.mul( float( 0.5 ) ).add( 1.0 ).mul( float( 1 / Math.PI ) ).mul( dotNH.pow( shininess ) );
} );
const BRDF_BlinnPhong = /*@__PURE__*/ Fn( ( { lightDirection } ) => {
const halfDir = lightDirection.add( positionViewDirection ).normalize();
const dotNH = transformedNormalView.dot( halfDir ).clamp();
const dotVH = positionViewDirection.dot( halfDir ).clamp();
const F = F_Schlick( { f0: specularColor, f90: 1.0, dotVH } );
const G = G_BlinnPhong_Implicit();
const D = D_BlinnPhong( { dotNH } );
return F.mul( G ).mul( D );
} );
/**
* Represents the lighting model for a phong material. Used in {@link MeshPhongNodeMaterial}.
*
* @augments BasicLightingModel
*/
class PhongLightingModel extends BasicLightingModel {
/**
* Constructs a new phong lighting model.
*
* @param {Boolean} [specular=true] - Whether specular is supported or not.
*/
constructor( specular = true ) {
super();
/**
* Whether specular is supported or not. Set this to `false` if you are
* looking for a Lambert-like material meaning a material for non-shiny
* surfaces, without specular highlights.
*
* @type {Boolean}
* @default true
*/
this.specular = specular;
}
/**
* Implements the direct lighting. The specular portion is optional an can be controlled
* with the {@link PhongLightingModel#specular} flag.
*
* @param {Object} input - The input data.
* @param {StackNode} stack - The current stack.
* @param {NodeBuilder} builder - The current node builder.
*/
direct( { lightDirection, lightColor, reflectedLight } ) {
const dotNL = transformedNormalView.dot( lightDirection ).clamp();
const irradiance = dotNL.mul( lightColor );
reflectedLight.directDiffuse.addAssign( irradiance.mul( BRDF_Lambert( { diffuseColor: diffuseColor.rgb } ) ) );
if ( this.specular === true ) {
reflectedLight.directSpecular.addAssign( irradiance.mul( BRDF_BlinnPhong( { lightDirection } ) ).mul( materialSpecularStrength ) );
}
}
/**
* Implements the indirect lighting.
*
* @param {ContextNode} input - The current node context.
* @param {StackNode} stack - The current stack.
* @param {NodeBuilder} builder - The current node builder.
*/
indirect( { ambientOcclusion, irradiance, reflectedLight } ) {
reflectedLight.indirectDiffuse.addAssign( irradiance.mul( BRDF_Lambert( { diffuseColor } ) ) );
reflectedLight.indirectDiffuse.mulAssign( ambientOcclusion );
}
}
const _defaultValues$8 = /*@__PURE__*/ new MeshLambertMaterial();
/**
* Node material version of `MeshLambertMaterial`.
*
* @augments NodeMaterial
*/
class MeshLambertNodeMaterial extends NodeMaterial {
static get type() {
return 'MeshLambertNodeMaterial';
}
/**
* Constructs a new mesh lambert node material.
*
* @param {Object?} parameters - The configuration parameter.
*/
constructor( parameters ) {
super();
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isMeshLambertNodeMaterial = true;
/**
* Set to `true` because lambert materials react on lights.
*
* @type {Boolean}
* @default true
*/
this.lights = true;
this.setDefaultValues( _defaultValues$8 );
this.setValues( parameters );
}
/**
* Overwritten since this type of material uses {@link BasicEnvironmentNode}
* to implement the default environment mapping.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {BasicEnvironmentNode<vec3>?} The environment node.
*/
setupEnvironment( builder ) {
const envNode = super.setupEnvironment( builder );
return envNode ? new BasicEnvironmentNode( envNode ) : null;
}
/**
* Setups the lighting model.
*
* @return {PhongLightingModel} The lighting model.
*/
setupLightingModel( /*builder*/ ) {
return new PhongLightingModel( false ); // ( specular ) -> force lambert
}
}
const _defaultValues$7 = /*@__PURE__*/ new MeshPhongMaterial();
/**
* Node material version of `MeshPhongMaterial`.
*
* @augments NodeMaterial
*/
class MeshPhongNodeMaterial extends NodeMaterial {
static get type() {
return 'MeshPhongNodeMaterial';
}
/**
* Constructs a new mesh lambert node material.
*
* @param {Object?} parameters - The configuration parameter.
*/
constructor( parameters ) {
super();
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isMeshPhongNodeMaterial = true;
/**
* Set to `true` because phong materials react on lights.
*
* @type {Boolean}
* @default true
*/
this.lights = true;
/**
* The shininess of phong materials is by default inferred from the `shininess`
* property. This node property allows to overwrite the default
* and define the shininess with a node instead.
*
* If you don't want to overwrite the shininess but modify the existing
* value instead, use {@link module:MaterialNode.materialShininess}.
*
* @type {Node<float>?}
* @default null
*/
this.shininessNode = null;
/**
* The specular color of phong materials is by default inferred from the
* `specular` property. This node property allows to overwrite the default
* and define the specular color with a node instead.
*
* If you don't want to overwrite the specular color but modify the existing
* value instead, use {@link module:MaterialNode.materialSpecular}.
*
* @type {Node<vec3>?}
* @default null
*/
this.specularNode = null;
this.setDefaultValues( _defaultValues$7 );
this.setValues( parameters );
}
/**
* Overwritten since this type of material uses {@link BasicEnvironmentNode}
* to implement the default environment mapping.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {BasicEnvironmentNode<vec3>?} The environment node.
*/
setupEnvironment( builder ) {
const envNode = super.setupEnvironment( builder );
return envNode ? new BasicEnvironmentNode( envNode ) : null;
}
/**
* Setups the lighting model.
*
* @return {PhongLightingModel} The lighting model.
*/
setupLightingModel( /*builder*/ ) {
return new PhongLightingModel();
}
/**
* Setups the phong specific node variables.
*
* @param {NodeBuilder} builder - The current node builder.
*/
setupVariants( /*builder*/ ) {
// SHININESS
const shininessNode = ( this.shininessNode ? float( this.shininessNode ) : materialShininess ).max( 1e-4 ); // to prevent pow( 0.0, 0.0 )
shininess.assign( shininessNode );
// SPECULAR COLOR
const specularNode = this.specularNode || materialSpecular;
specularColor.assign( specularNode );
}
copy( source ) {
this.shininessNode = source.shininessNode;
this.specularNode = source.specularNode;
return super.copy( source );
}
}
const getGeometryRoughness = /*@__PURE__*/ Fn( ( builder ) => {
if ( builder.geometry.hasAttribute( 'normal' ) === false ) {
return float( 0 );
}
const dxy = normalView.dFdx().abs().max( normalView.dFdy().abs() );
const geometryRoughness = dxy.x.max( dxy.y ).max( dxy.z );
return geometryRoughness;
} );
const getRoughness = /*@__PURE__*/ Fn( ( inputs ) => {
const { roughness } = inputs;
const geometryRoughness = getGeometryRoughness();
let roughnessFactor = roughness.max( 0.0525 ); // 0.0525 corresponds to the base mip of a 256 cubemap.
roughnessFactor = roughnessFactor.add( geometryRoughness );
roughnessFactor = roughnessFactor.min( 1.0 );
return roughnessFactor;
} );
// Moving Frostbite to Physically Based Rendering 3.0 - page 12, listing 2
// https://seblagarde.files.wordpress.com/2015/07/course_notes_moving_frostbite_to_pbr_v32.pdf
const V_GGX_SmithCorrelated = /*@__PURE__*/ Fn( ( { alpha, dotNL, dotNV } ) => {
const a2 = alpha.pow2();
const gv = dotNL.mul( a2.add( a2.oneMinus().mul( dotNV.pow2() ) ).sqrt() );
const gl = dotNV.mul( a2.add( a2.oneMinus().mul( dotNL.pow2() ) ).sqrt() );
return div( 0.5, gv.add( gl ).max( EPSILON ) );
} ).setLayout( {
name: 'V_GGX_SmithCorrelated',
type: 'float',
inputs: [
{ name: 'alpha', type: 'float' },
{ name: 'dotNL', type: 'float' },
{ name: 'dotNV', type: 'float' }
]
} ); // validated
// https://google.github.io/filament/Filament.md.html#materialsystem/anisotropicmodel/anisotropicspecularbrdf
const V_GGX_SmithCorrelated_Anisotropic = /*@__PURE__*/ Fn( ( { alphaT, alphaB, dotTV, dotBV, dotTL, dotBL, dotNV, dotNL } ) => {
const gv = dotNL.mul( vec3( alphaT.mul( dotTV ), alphaB.mul( dotBV ), dotNV ).length() );
const gl = dotNV.mul( vec3( alphaT.mul( dotTL ), alphaB.mul( dotBL ), dotNL ).length() );
const v = div( 0.5, gv.add( gl ) );
return v.saturate();
} ).setLayout( {
name: 'V_GGX_SmithCorrelated_Anisotropic',
type: 'float',
inputs: [
{ name: 'alphaT', type: 'float', qualifier: 'in' },
{ name: 'alphaB', type: 'float', qualifier: 'in' },
{ name: 'dotTV', type: 'float', qualifier: 'in' },
{ name: 'dotBV', type: 'float', qualifier: 'in' },
{ name: 'dotTL', type: 'float', qualifier: 'in' },
{ name: 'dotBL', type: 'float', qualifier: 'in' },
{ name: 'dotNV', type: 'float', qualifier: 'in' },
{ name: 'dotNL', type: 'float', qualifier: 'in' }
]
} );
// Microfacet Models for Refraction through Rough Surfaces - equation (33)
// http://graphicrants.blogspot.com/2013/08/specular-brdf-reference.html
// alpha is "roughness squared" in Disney’s reparameterization
const D_GGX = /*@__PURE__*/ Fn( ( { alpha, dotNH } ) => {
const a2 = alpha.pow2();
const denom = dotNH.pow2().mul( a2.oneMinus() ).oneMinus(); // avoid alpha = 0 with dotNH = 1
return a2.div( denom.pow2() ).mul( 1 / Math.PI );
} ).setLayout( {
name: 'D_GGX',
type: 'float',
inputs: [
{ name: 'alpha', type: 'float' },
{ name: 'dotNH', type: 'float' }
]
} ); // validated
const RECIPROCAL_PI = /*@__PURE__*/ float( 1 / Math.PI );
// https://google.github.io/filament/Filament.md.html#materialsystem/anisotropicmodel/anisotropicspecularbrdf
const D_GGX_Anisotropic = /*@__PURE__*/ Fn( ( { alphaT, alphaB, dotNH, dotTH, dotBH } ) => {
const a2 = alphaT.mul( alphaB );
const v = vec3( alphaB.mul( dotTH ), alphaT.mul( dotBH ), a2.mul( dotNH ) );
const v2 = v.dot( v );
const w2 = a2.div( v2 );
return RECIPROCAL_PI.mul( a2.mul( w2.pow2() ) );
} ).setLayout( {
name: 'D_GGX_Anisotropic',
type: 'float',
inputs: [
{ name: 'alphaT', type: 'float', qualifier: 'in' },
{ name: 'alphaB', type: 'float', qualifier: 'in' },
{ name: 'dotNH', type: 'float', qualifier: 'in' },
{ name: 'dotTH', type: 'float', qualifier: 'in' },
{ name: 'dotBH', type: 'float', qualifier: 'in' }
]
} );
// GGX Distribution, Schlick Fresnel, GGX_SmithCorrelated Visibility
const BRDF_GGX = /*@__PURE__*/ Fn( ( inputs ) => {
const { lightDirection, f0, f90, roughness, f, USE_IRIDESCENCE, USE_ANISOTROPY } = inputs;
const normalView = inputs.normalView || transformedNormalView;
const alpha = roughness.pow2(); // UE4's roughness
const halfDir = lightDirection.add( positionViewDirection ).normalize();
const dotNL = normalView.dot( lightDirection ).clamp();
const dotNV = normalView.dot( positionViewDirection ).clamp(); // @ TODO: Move to core dotNV
const dotNH = normalView.dot( halfDir ).clamp();
const dotVH = positionViewDirection.dot( halfDir ).clamp();
let F = F_Schlick( { f0, f90, dotVH } );
let V, D;
if ( defined( USE_IRIDESCENCE ) ) {
F = iridescence.mix( F, f );
}
if ( defined( USE_ANISOTROPY ) ) {
const dotTL = anisotropyT.dot( lightDirection );
const dotTV = anisotropyT.dot( positionViewDirection );
const dotTH = anisotropyT.dot( halfDir );
const dotBL = anisotropyB.dot( lightDirection );
const dotBV = anisotropyB.dot( positionViewDirection );
const dotBH = anisotropyB.dot( halfDir );
V = V_GGX_SmithCorrelated_Anisotropic( { alphaT, alphaB: alpha, dotTV, dotBV, dotTL, dotBL, dotNV, dotNL } );
D = D_GGX_Anisotropic( { alphaT, alphaB: alpha, dotNH, dotTH, dotBH } );
} else {
V = V_GGX_SmithCorrelated( { alpha, dotNL, dotNV } );
D = D_GGX( { alpha, dotNH } );
}
return F.mul( V ).mul( D );
} ); // validated
// Analytical approximation of the DFG LUT, one half of the
// split-sum approximation used in indirect specular lighting.
// via 'environmentBRDF' from "Physically Based Shading on Mobile"
// https://www.unrealengine.com/blog/physically-based-shading-on-mobile
const DFGApprox = /*@__PURE__*/ Fn( ( { roughness, dotNV } ) => {
const c0 = vec4( - 1, - 0.0275, - 0.572, 0.022 );
const c1 = vec4( 1, 0.0425, 1.04, - 0.04 );
const r = roughness.mul( c0 ).add( c1 );
const a004 = r.x.mul( r.x ).min( dotNV.mul( - 9.28 ).exp2() ).mul( r.x ).add( r.y );
const fab = vec2( - 1.04, 1.04 ).mul( a004 ).add( r.zw );
return fab;
} ).setLayout( {
name: 'DFGApprox',
type: 'vec2',
inputs: [
{ name: 'roughness', type: 'float' },
{ name: 'dotNV', type: 'vec3' }
]
} );
const EnvironmentBRDF = /*@__PURE__*/ Fn( ( inputs ) => {
const { dotNV, specularColor, specularF90, roughness } = inputs;
const fab = DFGApprox( { dotNV, roughness } );
return specularColor.mul( fab.x ).add( specularF90.mul( fab.y ) );
} );
const Schlick_to_F0 = /*@__PURE__*/ Fn( ( { f, f90, dotVH } ) => {
const x = dotVH.oneMinus().saturate();
const x2 = x.mul( x );
const x5 = x.mul( x2, x2 ).clamp( 0, .9999 );
return f.sub( vec3( f90 ).mul( x5 ) ).div( x5.oneMinus() );
} ).setLayout( {
name: 'Schlick_to_F0',
type: 'vec3',
inputs: [
{ name: 'f', type: 'vec3' },
{ name: 'f90', type: 'float' },
{ name: 'dotVH', type: 'float' }
]
} );
// https://github.com/google/filament/blob/master/shaders/src/brdf.fs
const D_Charlie = /*@__PURE__*/ Fn( ( { roughness, dotNH } ) => {
const alpha = roughness.pow2();
// Estevez and Kulla 2017, "Production Friendly Microfacet Sheen BRDF"
const invAlpha = float( 1.0 ).div( alpha );
const cos2h = dotNH.pow2();
const sin2h = cos2h.oneMinus().max( 0.0078125 ); // 2^(-14/2), so sin2h^2 > 0 in fp16
return float( 2.0 ).add( invAlpha ).mul( sin2h.pow( invAlpha.mul( 0.5 ) ) ).div( 2.0 * Math.PI );
} ).setLayout( {
name: 'D_Charlie',
type: 'float',
inputs: [
{ name: 'roughness', type: 'float' },
{ name: 'dotNH', type: 'float' }
]
} );
// https://github.com/google/filament/blob/master/shaders/src/brdf.fs
const V_Neubelt = /*@__PURE__*/ Fn( ( { dotNV, dotNL } ) => {
// Neubelt and Pettineo 2013, "Crafting a Next-gen Material Pipeline for The Order: 1886"
return float( 1.0 ).div( float( 4.0 ).mul( dotNL.add( dotNV ).sub( dotNL.mul( dotNV ) ) ) );
} ).setLayout( {
name: 'V_Neubelt',
type: 'float',
inputs: [
{ name: 'dotNV', type: 'float' },
{ name: 'dotNL', type: 'float' }
]
} );
const BRDF_Sheen = /*@__PURE__*/ Fn( ( { lightDirection } ) => {
const halfDir = lightDirection.add( positionViewDirection ).normalize();
const dotNL = transformedNormalView.dot( lightDirection ).clamp();
const dotNV = transformedNormalView.dot( positionViewDirection ).clamp();
const dotNH = transformedNormalView.dot( halfDir ).clamp();
const D = D_Charlie( { roughness: sheenRoughness, dotNH } );
const V = V_Neubelt( { dotNV, dotNL } );
return sheen.mul( D ).mul( V );
} );
// Rect Area Light
// Real-Time Polygonal-Light Shading with Linearly Transformed Cosines
// by Eric Heitz, Jonathan Dupuy, Stephen Hill and David Neubelt
// code: https://github.com/selfshadow/ltc_code/
const LTC_Uv = /*@__PURE__*/ Fn( ( { N, V, roughness } ) => {
const LUT_SIZE = 64.0;
const LUT_SCALE = ( LUT_SIZE - 1.0 ) / LUT_SIZE;
const LUT_BIAS = 0.5 / LUT_SIZE;
const dotNV = N.dot( V ).saturate();
// texture parameterized by sqrt( GGX alpha ) and sqrt( 1 - cos( theta ) )
const uv = vec2( roughness, dotNV.oneMinus().sqrt() );
uv.assign( uv.mul( LUT_SCALE ).add( LUT_BIAS ) );
return uv;
} ).setLayout( {
name: 'LTC_Uv',
type: 'vec2',
inputs: [
{ name: 'N', type: 'vec3' },
{ name: 'V', type: 'vec3' },
{ name: 'roughness', type: 'float' }
]
} );
const LTC_ClippedSphereFormFactor = /*@__PURE__*/ Fn( ( { f } ) => {
// Real-Time Area Lighting: a Journey from Research to Production (p.102)
// An approximation of the form factor of a horizon-clipped rectangle.
const l = f.length();
return max$1( l.mul( l ).add( f.z ).div( l.add( 1.0 ) ), 0 );
} ).setLayout( {
name: 'LTC_ClippedSphereFormFactor',
type: 'float',
inputs: [
{ name: 'f', type: 'vec3' }
]
} );
const LTC_EdgeVectorFormFactor = /*@__PURE__*/ Fn( ( { v1, v2 } ) => {
const x = v1.dot( v2 );
const y = x.abs().toVar();
// rational polynomial approximation to theta / sin( theta ) / 2PI
const a = y.mul( 0.0145206 ).add( 0.4965155 ).mul( y ).add( 0.8543985 ).toVar();
const b = y.add( 4.1616724 ).mul( y ).add( 3.4175940 ).toVar();
const v = a.div( b );
const theta_sintheta = x.greaterThan( 0.0 ).select( v, max$1( x.mul( x ).oneMinus(), 1e-7 ).inverseSqrt().mul( 0.5 ).sub( v ) );
return v1.cross( v2 ).mul( theta_sintheta );
} ).setLayout( {
name: 'LTC_EdgeVectorFormFactor',
type: 'vec3',
inputs: [
{ name: 'v1', type: 'vec3' },
{ name: 'v2', type: 'vec3' }
]
} );
const LTC_Evaluate = /*@__PURE__*/ Fn( ( { N, V, P, mInv, p0, p1, p2, p3 } ) => {
// bail if point is on back side of plane of light
// assumes ccw winding order of light vertices
const v1 = p1.sub( p0 ).toVar();
const v2 = p3.sub( p0 ).toVar();
const lightNormal = v1.cross( v2 );
const result = vec3().toVar();
If( lightNormal.dot( P.sub( p0 ) ).greaterThanEqual( 0.0 ), () => {
// construct orthonormal basis around N
const T1 = V.sub( N.mul( V.dot( N ) ) ).normalize();
const T2 = N.cross( T1 ).negate(); // negated from paper; possibly due to a different handedness of world coordinate system
// compute transform
const mat = mInv.mul( mat3( T1, T2, N ).transpose() ).toVar();
// transform rect
// & project rect onto sphere
const coords0 = mat.mul( p0.sub( P ) ).normalize().toVar();
const coords1 = mat.mul( p1.sub( P ) ).normalize().toVar();
const coords2 = mat.mul( p2.sub( P ) ).normalize().toVar();
const coords3 = mat.mul( p3.sub( P ) ).normalize().toVar();
// calculate vector form factor
const vectorFormFactor = vec3( 0 ).toVar();
vectorFormFactor.addAssign( LTC_EdgeVectorFormFactor( { v1: coords0, v2: coords1 } ) );
vectorFormFactor.addAssign( LTC_EdgeVectorFormFactor( { v1: coords1, v2: coords2 } ) );
vectorFormFactor.addAssign( LTC_EdgeVectorFormFactor( { v1: coords2, v2: coords3 } ) );
vectorFormFactor.addAssign( LTC_EdgeVectorFormFactor( { v1: coords3, v2: coords0 } ) );
// adjust for horizon clipping
result.assign( vec3( LTC_ClippedSphereFormFactor( { f: vectorFormFactor } ) ) );
} );
return result;
} ).setLayout( {
name: 'LTC_Evaluate',
type: 'vec3',
inputs: [
{ name: 'N', type: 'vec3' },
{ name: 'V', type: 'vec3' },
{ name: 'P', type: 'vec3' },
{ name: 'mInv', type: 'mat3' },
{ name: 'p0', type: 'vec3' },
{ name: 'p1', type: 'vec3' },
{ name: 'p2', type: 'vec3' },
{ name: 'p3', type: 'vec3' }
]
} );
/** @module TextureBicubic **/
// Mipped Bicubic Texture Filtering by N8
// https://www.shadertoy.com/view/Dl2SDW
const bC = 1.0 / 6.0;
const w0 = ( a ) => mul( bC, mul( a, mul( a, a.negate().add( 3.0 ) ).sub( 3.0 ) ).add( 1.0 ) );
const w1 = ( a ) => mul( bC, mul( a, mul( a, mul( 3.0, a ).sub( 6.0 ) ) ).add( 4.0 ) );
const w2 = ( a ) => mul( bC, mul( a, mul( a, mul( - 3.0, a ).add( 3.0 ) ).add( 3.0 ) ).add( 1.0 ) );
const w3 = ( a ) => mul( bC, pow( a, 3 ) );
const g0 = ( a ) => w0( a ).add( w1( a ) );
const g1 = ( a ) => w2( a ).add( w3( a ) );
// h0 and h1 are the two offset functions
const h0 = ( a ) => add( - 1.0, w1( a ).div( w0( a ).add( w1( a ) ) ) );
const h1 = ( a ) => add( 1.0, w3( a ).div( w2( a ).add( w3( a ) ) ) );
const bicubic = ( textureNode, texelSize, lod ) => {
const uv = textureNode.uvNode;
const uvScaled = mul( uv, texelSize.zw ).add( 0.5 );
const iuv = floor( uvScaled );
const fuv = fract( uvScaled );
const g0x = g0( fuv.x );
const g1x = g1( fuv.x );
const h0x = h0( fuv.x );
const h1x = h1( fuv.x );
const h0y = h0( fuv.y );
const h1y = h1( fuv.y );
const p0 = vec2( iuv.x.add( h0x ), iuv.y.add( h0y ) ).sub( 0.5 ).mul( texelSize.xy );
const p1 = vec2( iuv.x.add( h1x ), iuv.y.add( h0y ) ).sub( 0.5 ).mul( texelSize.xy );
const p2 = vec2( iuv.x.add( h0x ), iuv.y.add( h1y ) ).sub( 0.5 ).mul( texelSize.xy );
const p3 = vec2( iuv.x.add( h1x ), iuv.y.add( h1y ) ).sub( 0.5 ).mul( texelSize.xy );
const a = g0( fuv.y ).mul( add( g0x.mul( textureNode.sample( p0 ).level( lod ) ), g1x.mul( textureNode.sample( p1 ).level( lod ) ) ) );
const b = g1( fuv.y ).mul( add( g0x.mul( textureNode.sample( p2 ).level( lod ) ), g1x.mul( textureNode.sample( p3 ).level( lod ) ) ) );
return a.add( b );
};
/**
* Applies mipped bicubic texture filtering to the given texture node.
*
* @method
* @param {TextureNode} textureNode - The texture node that should be filtered.
* @param {Node<float>} [lodNode=float(3)] - Defines the LOD to sample from.
* @return {Node} The filtered texture sample.
*/
const textureBicubic = /*@__PURE__*/ Fn( ( [ textureNode, lodNode = float( 3 ) ] ) => {
const fLodSize = vec2( textureNode.size( int( lodNode ) ) );
const cLodSize = vec2( textureNode.size( int( lodNode.add( 1.0 ) ) ) );
const fLodSizeInv = div( 1.0, fLodSize );
const cLodSizeInv = div( 1.0, cLodSize );
const fSample = bicubic( textureNode, vec4( fLodSizeInv, fLodSize ), floor( lodNode ) );
const cSample = bicubic( textureNode, vec4( cLodSizeInv, cLodSize ), ceil( lodNode ) );
return fract( lodNode ).mix( fSample, cSample );
} );
//
// Transmission
//
const getVolumeTransmissionRay = /*@__PURE__*/ Fn( ( [ n, v, thickness, ior, modelMatrix ] ) => {
// Direction of refracted light.
const refractionVector = vec3( refract( v.negate(), normalize( n ), div( 1.0, ior ) ) );
// Compute rotation-independent scaling of the model matrix.
const modelScale = vec3(
length( modelMatrix[ 0 ].xyz ),
length( modelMatrix[ 1 ].xyz ),
length( modelMatrix[ 2 ].xyz )
);
// The thickness is specified in local space.
return normalize( refractionVector ).mul( thickness.mul( modelScale ) );
} ).setLayout( {
name: 'getVolumeTransmissionRay',
type: 'vec3',
inputs: [
{ name: 'n', type: 'vec3' },
{ name: 'v', type: 'vec3' },
{ name: 'thickness', type: 'float' },
{ name: 'ior', type: 'float' },
{ name: 'modelMatrix', type: 'mat4' }
]
} );
const applyIorToRoughness = /*@__PURE__*/ Fn( ( [ roughness, ior ] ) => {
// Scale roughness with IOR so that an IOR of 1.0 results in no microfacet refraction and
// an IOR of 1.5 results in the default amount of microfacet refraction.
return roughness.mul( clamp( ior.mul( 2.0 ).sub( 2.0 ), 0.0, 1.0 ) );
} ).setLayout( {
name: 'applyIorToRoughness',
type: 'float',
inputs: [
{ name: 'roughness', type: 'float' },
{ name: 'ior', type: 'float' }
]
} );
const viewportBackSideTexture = /*@__PURE__*/ viewportMipTexture();
const viewportFrontSideTexture = /*@__PURE__*/ viewportMipTexture();
const getTransmissionSample = /*@__PURE__*/ Fn( ( [ fragCoord, roughness, ior ], { material } ) => {
const vTexture = material.side === BackSide ? viewportBackSideTexture : viewportFrontSideTexture;
const transmissionSample = vTexture.sample( fragCoord );
//const transmissionSample = viewportMipTexture( fragCoord );
const lod = log2( screenSize.x ).mul( applyIorToRoughness( roughness, ior ) );
return textureBicubic( transmissionSample, lod );
} );
const volumeAttenuation = /*@__PURE__*/ Fn( ( [ transmissionDistance, attenuationColor, attenuationDistance ] ) => {
If( attenuationDistance.notEqual( 0 ), () => {
// Compute light attenuation using Beer's law.
const attenuationCoefficient = log( attenuationColor ).negate().div( attenuationDistance );
const transmittance = exp( attenuationCoefficient.negate().mul( transmissionDistance ) );
return transmittance;
} );
// Attenuation distance is +∞, i.e. the transmitted color is not attenuated at all.
return vec3( 1.0 );
} ).setLayout( {
name: 'volumeAttenuation',
type: 'vec3',
inputs: [
{ name: 'transmissionDistance', type: 'float' },
{ name: 'attenuationColor', type: 'vec3' },
{ name: 'attenuationDistance', type: 'float' }
]
} );
const getIBLVolumeRefraction = /*@__PURE__*/ Fn( ( [ n, v, roughness, diffuseColor, specularColor, specularF90, position, modelMatrix, viewMatrix, projMatrix, ior, thickness, attenuationColor, attenuationDistance, dispersion ] ) => {
let transmittedLight, transmittance;
if ( dispersion ) {
transmittedLight = vec4().toVar();
transmittance = vec3().toVar();
const halfSpread = ior.sub( 1.0 ).mul( dispersion.mul( 0.025 ) );
const iors = vec3( ior.sub( halfSpread ), ior, ior.add( halfSpread ) );
Loop( { start: 0, end: 3 }, ( { i } ) => {
const ior = iors.element( i );
const transmissionRay = getVolumeTransmissionRay( n, v, thickness, ior, modelMatrix );
const refractedRayExit = position.add( transmissionRay );
// Project refracted vector on the framebuffer, while mapping to normalized device coordinates.
const ndcPos = projMatrix.mul( viewMatrix.mul( vec4( refractedRayExit, 1.0 ) ) );
const refractionCoords = vec2( ndcPos.xy.div( ndcPos.w ) ).toVar();
refractionCoords.addAssign( 1.0 );
refractionCoords.divAssign( 2.0 );
refractionCoords.assign( vec2( refractionCoords.x, refractionCoords.y.oneMinus() ) ); // webgpu
// Sample framebuffer to get pixel the refracted ray hits.
const transmissionSample = getTransmissionSample( refractionCoords, roughness, ior );
transmittedLight.element( i ).assign( transmissionSample.element( i ) );
transmittedLight.a.addAssign( transmissionSample.a );
transmittance.element( i ).assign( diffuseColor.element( i ).mul( volumeAttenuation( length( transmissionRay ), attenuationColor, attenuationDistance ).element( i ) ) );
} );
transmittedLight.a.divAssign( 3.0 );
} else {
const transmissionRay = getVolumeTransmissionRay( n, v, thickness, ior, modelMatrix );
const refractedRayExit = position.add( transmissionRay );
// Project refracted vector on the framebuffer, while mapping to normalized device coordinates.
const ndcPos = projMatrix.mul( viewMatrix.mul( vec4( refractedRayExit, 1.0 ) ) );
const refractionCoords = vec2( ndcPos.xy.div( ndcPos.w ) ).toVar();
refractionCoords.addAssign( 1.0 );
refractionCoords.divAssign( 2.0 );
refractionCoords.assign( vec2( refractionCoords.x, refractionCoords.y.oneMinus() ) ); // webgpu
// Sample framebuffer to get pixel the refracted ray hits.
transmittedLight = getTransmissionSample( refractionCoords, roughness, ior );
transmittance = diffuseColor.mul( volumeAttenuation( length( transmissionRay ), attenuationColor, attenuationDistance ) );
}
const attenuatedColor = transmittance.rgb.mul( transmittedLight.rgb );
const dotNV = n.dot( v ).clamp();
// Get the specular component.
const F = vec3( EnvironmentBRDF( { // n, v, specularColor, specularF90, roughness
dotNV,
specularColor,
specularF90,
roughness
} ) );
// As less light is transmitted, the opacity should be increased. This simple approximation does a decent job
// of modulating a CSS background, and has no effect when the buffer is opaque, due to a solid object or clear color.
const transmittanceFactor = transmittance.r.add( transmittance.g, transmittance.b ).div( 3.0 );
return vec4( F.oneMinus().mul( attenuatedColor ), transmittedLight.a.oneMinus().mul( transmittanceFactor ).oneMinus() );
} );
//
// Iridescence
//
// XYZ to linear-sRGB color space
const XYZ_TO_REC709 = /*@__PURE__*/ mat3(
3.2404542, - 0.9692660, 0.0556434,
- 1.5371385, 1.8760108, - 0.2040259,
- 0.4985314, 0.0415560, 1.0572252
);
// Assume air interface for top
// Note: We don't handle the case fresnel0 == 1
const Fresnel0ToIor = ( fresnel0 ) => {
const sqrtF0 = fresnel0.sqrt();
return vec3( 1.0 ).add( sqrtF0 ).div( vec3( 1.0 ).sub( sqrtF0 ) );
};
// ior is a value between 1.0 and 3.0. 1.0 is air interface
const IorToFresnel0 = ( transmittedIor, incidentIor ) => {
return transmittedIor.sub( incidentIor ).div( transmittedIor.add( incidentIor ) ).pow2();
};
// Fresnel equations for dielectric/dielectric interfaces.
// Ref: https://belcour.github.io/blog/research/2017/05/01/brdf-thin-film.html
// Evaluation XYZ sensitivity curves in Fourier space
const evalSensitivity = ( OPD, shift ) => {
const phase = OPD.mul( 2.0 * Math.PI * 1.0e-9 );
const val = vec3( 5.4856e-13, 4.4201e-13, 5.2481e-13 );
const pos = vec3( 1.6810e+06, 1.7953e+06, 2.2084e+06 );
const VAR = vec3( 4.3278e+09, 9.3046e+09, 6.6121e+09 );
const x = float( 9.7470e-14 * Math.sqrt( 2.0 * Math.PI * 4.5282e+09 ) ).mul( phase.mul( 2.2399e+06 ).add( shift.x ).cos() ).mul( phase.pow2().mul( - 4.5282e+09 ).exp() );
let xyz = val.mul( VAR.mul( 2.0 * Math.PI ).sqrt() ).mul( pos.mul( phase ).add( shift ).cos() ).mul( phase.pow2().negate().mul( VAR ).exp() );
xyz = vec3( xyz.x.add( x ), xyz.y, xyz.z ).div( 1.0685e-7 );
const rgb = XYZ_TO_REC709.mul( xyz );
return rgb;
};
const evalIridescence = /*@__PURE__*/ Fn( ( { outsideIOR, eta2, cosTheta1, thinFilmThickness, baseF0 } ) => {
// Force iridescenceIOR -> outsideIOR when thinFilmThickness -> 0.0
const iridescenceIOR = mix( outsideIOR, eta2, smoothstep( 0.0, 0.03, thinFilmThickness ) );
// Evaluate the cosTheta on the base layer (Snell law)
const sinTheta2Sq = outsideIOR.div( iridescenceIOR ).pow2().mul( cosTheta1.pow2().oneMinus() );
// Handle TIR:
const cosTheta2Sq = sinTheta2Sq.oneMinus();
If( cosTheta2Sq.lessThan( 0 ), () => {
return vec3( 1.0 );
} );
const cosTheta2 = cosTheta2Sq.sqrt();
// First interface
const R0 = IorToFresnel0( iridescenceIOR, outsideIOR );
const R12 = F_Schlick( { f0: R0, f90: 1.0, dotVH: cosTheta1 } );
//const R21 = R12;
const T121 = R12.oneMinus();
const phi12 = iridescenceIOR.lessThan( outsideIOR ).select( Math.PI, 0.0 );
const phi21 = float( Math.PI ).sub( phi12 );
// Second interface
const baseIOR = Fresnel0ToIor( baseF0.clamp( 0.0, 0.9999 ) ); // guard against 1.0
const R1 = IorToFresnel0( baseIOR, iridescenceIOR.toVec3() );
const R23 = F_Schlick( { f0: R1, f90: 1.0, dotVH: cosTheta2 } );
const phi23 = vec3(
baseIOR.x.lessThan( iridescenceIOR ).select( Math.PI, 0.0 ),
baseIOR.y.lessThan( iridescenceIOR ).select( Math.PI, 0.0 ),
baseIOR.z.lessThan( iridescenceIOR ).select( Math.PI, 0.0 )
);
// Phase shift
const OPD = iridescenceIOR.mul( thinFilmThickness, cosTheta2, 2.0 );
const phi = vec3( phi21 ).add( phi23 );
// Compound terms
const R123 = R12.mul( R23 ).clamp( 1e-5, 0.9999 );
const r123 = R123.sqrt();
const Rs = T121.pow2().mul( R23 ).div( vec3( 1.0 ).sub( R123 ) );
// Reflectance term for m = 0 (DC term amplitude)
const C0 = R12.add( Rs );
const I = C0.toVar();
// Reflectance term for m > 0 (pairs of diracs)
const Cm = Rs.sub( T121 ).toVar();
Loop( { start: 1, end: 2, condition: '<=', name: 'm' }, ( { m } ) => {
Cm.mulAssign( r123 );
const Sm = evalSensitivity( float( m ).mul( OPD ), float( m ).mul( phi ) ).mul( 2.0 );
I.addAssign( Cm.mul( Sm ) );
} );
// Since out of gamut colors might be produced, negative color values are clamped to 0.
return I.max( vec3( 0.0 ) );
} ).setLayout( {
name: 'evalIridescence',
type: 'vec3',
inputs: [
{ name: 'outsideIOR', type: 'float' },
{ name: 'eta2', type: 'float' },
{ name: 'cosTheta1', type: 'float' },
{ name: 'thinFilmThickness', type: 'float' },
{ name: 'baseF0', type: 'vec3' }
]
} );
//
// Sheen
//
// This is a curve-fit approximation to the "Charlie sheen" BRDF integrated over the hemisphere from
// Estevez and Kulla 2017, "Production Friendly Microfacet Sheen BRDF". The analysis can be found
// in the Sheen section of https://drive.google.com/file/d/1T0D1VSyR4AllqIJTQAraEIzjlb5h4FKH/view?usp=sharing
const IBLSheenBRDF = /*@__PURE__*/ Fn( ( { normal, viewDir, roughness } ) => {
const dotNV = normal.dot( viewDir ).saturate();
const r2 = roughness.pow2();
const a = select(
roughness.lessThan( 0.25 ),
float( - 339.2 ).mul( r2 ).add( float( 161.4 ).mul( roughness ) ).sub( 25.9 ),
float( - 8.48 ).mul( r2 ).add( float( 14.3 ).mul( roughness ) ).sub( 9.95 )
);
const b = select(
roughness.lessThan( 0.25 ),
float( 44.0 ).mul( r2 ).sub( float( 23.7 ).mul( roughness ) ).add( 3.26 ),
float( 1.97 ).mul( r2 ).sub( float( 3.27 ).mul( roughness ) ).add( 0.72 )
);
const DG = select( roughness.lessThan( 0.25 ), 0.0, float( 0.1 ).mul( roughness ).sub( 0.025 ) ).add( a.mul( dotNV ).add( b ).exp() );
return DG.mul( 1.0 / Math.PI ).saturate();
} );
const clearcoatF0 = vec3( 0.04 );
const clearcoatF90 = float( 1 );
/**
* Represents the lighting model for a PBR material.
*
* @augments LightingModel
*/
class PhysicalLightingModel extends LightingModel {
/**
* Constructs a new physical lighting model.
*
* @param {Boolean} [clearcoat=false] - Whether clearcoat is supported or not.
* @param {Boolean} [sheen=false] - Whether sheen is supported or not.
* @param {Boolean} [iridescence=false] - Whether iridescence is supported or not.
* @param {Boolean} [anisotropy=false] - Whether anisotropy is supported or not.
* @param {Boolean} [transmission=false] - Whether transmission is supported or not.
* @param {Boolean} [dispersion=false] - Whether dispersion is supported or not.
*/
constructor( clearcoat = false, sheen = false, iridescence = false, anisotropy = false, transmission = false, dispersion = false ) {
super();
/**
* Whether clearcoat is supported or not.
*
* @type {Boolean}
* @default false
*/
this.clearcoat = clearcoat;
/**
* Whether sheen is supported or not.
*
* @type {Boolean}
* @default false
*/
this.sheen = sheen;
/**
* Whether iridescence is supported or not.
*
* @type {Boolean}
* @default false
*/
this.iridescence = iridescence;
/**
* Whether anisotropy is supported or not.
*
* @type {Boolean}
* @default false
*/
this.anisotropy = anisotropy;
/**
* Whether transmission is supported or not.
*
* @type {Boolean}
* @default false
*/
this.transmission = transmission;
/**
* Whether dispersion is supported or not.
*
* @type {Boolean}
* @default false
*/
this.dispersion = dispersion;
/**
* The clear coat radiance.
*
* @type {Node?}
* @default null
*/
this.clearcoatRadiance = null;
/**
* The clear coat specular direct.
*
* @type {Node?}
* @default null
*/
this.clearcoatSpecularDirect = null;
/**
* The clear coat specular indirect.
*
* @type {Node?}
* @default null
*/
this.clearcoatSpecularIndirect = null;
/**
* The sheen specular direct.
*
* @type {Node?}
* @default null
*/
this.sheenSpecularDirect = null;
/**
* The sheen specular indirect.
*
* @type {Node?}
* @default null
*/
this.sheenSpecularIndirect = null;
/**
* The iridescence Fresnel.
*
* @type {Node?}
* @default null
*/
this.iridescenceFresnel = null;
/**
* The iridescence F0.
*
* @type {Node?}
* @default null
*/
this.iridescenceF0 = null;
}
/**
* Depending on what features are requested, the method prepares certain node variables
* which are later used for lighting computations.
*
* @param {ContextNode} context - The current node context.
*/
start( context ) {
if ( this.clearcoat === true ) {
this.clearcoatRadiance = vec3().toVar( 'clearcoatRadiance' );
this.clearcoatSpecularDirect = vec3().toVar( 'clearcoatSpecularDirect' );
this.clearcoatSpecularIndirect = vec3().toVar( 'clearcoatSpecularIndirect' );
}
if ( this.sheen === true ) {
this.sheenSpecularDirect = vec3().toVar( 'sheenSpecularDirect' );
this.sheenSpecularIndirect = vec3().toVar( 'sheenSpecularIndirect' );
}
if ( this.iridescence === true ) {
const dotNVi = transformedNormalView.dot( positionViewDirection ).clamp();
this.iridescenceFresnel = evalIridescence( {
outsideIOR: float( 1.0 ),
eta2: iridescenceIOR,
cosTheta1: dotNVi,
thinFilmThickness: iridescenceThickness,
baseF0: specularColor
} );
this.iridescenceF0 = Schlick_to_F0( { f: this.iridescenceFresnel, f90: 1.0, dotVH: dotNVi } );
}
if ( this.transmission === true ) {
const position = positionWorld;
const v = cameraPosition.sub( positionWorld ).normalize(); // TODO: Create Node for this, same issue in MaterialX
const n = transformedNormalWorld;
context.backdrop = getIBLVolumeRefraction(
n,
v,
roughness,
diffuseColor,
specularColor,
specularF90, // specularF90
position, // positionWorld
modelWorldMatrix, // modelMatrix
cameraViewMatrix, // viewMatrix
cameraProjectionMatrix, // projMatrix
ior,
thickness,
attenuationColor,
attenuationDistance,
this.dispersion ? dispersion : null
);
context.backdropAlpha = transmission;
diffuseColor.a.mulAssign( mix( 1, context.backdrop.a, transmission ) );
}
}
// Fdez-Agüera's "Multiple-Scattering Microfacet Model for Real-Time Image Based Lighting"
// Approximates multi-scattering in order to preserve energy.
// http://www.jcgt.org/published/0008/01/03/
computeMultiscattering( singleScatter, multiScatter, specularF90 ) {
const dotNV = transformedNormalView.dot( positionViewDirection ).clamp(); // @ TODO: Move to core dotNV
const fab = DFGApprox( { roughness, dotNV } );
const Fr = this.iridescenceF0 ? iridescence.mix( specularColor, this.iridescenceF0 ) : specularColor;
const FssEss = Fr.mul( fab.x ).add( specularF90.mul( fab.y ) );
const Ess = fab.x.add( fab.y );
const Ems = Ess.oneMinus();
const Favg = specularColor.add( specularColor.oneMinus().mul( 0.047619 ) ); // 1/21
const Fms = FssEss.mul( Favg ).div( Ems.mul( Favg ).oneMinus() );
singleScatter.addAssign( FssEss );
multiScatter.addAssign( Fms.mul( Ems ) );
}
/**
* Implements the direct light.
*
* @param {Object} input - The input data.
* @param {StackNode} stack - The current stack.
* @param {NodeBuilder} builder - The current node builder.
*/
direct( { lightDirection, lightColor, reflectedLight } ) {
const dotNL = transformedNormalView.dot( lightDirection ).clamp();
const irradiance = dotNL.mul( lightColor );
if ( this.sheen === true ) {
this.sheenSpecularDirect.addAssign( irradiance.mul( BRDF_Sheen( { lightDirection } ) ) );
}
if ( this.clearcoat === true ) {
const dotNLcc = transformedClearcoatNormalView.dot( lightDirection ).clamp();
const ccIrradiance = dotNLcc.mul( lightColor );
this.clearcoatSpecularDirect.addAssign( ccIrradiance.mul( BRDF_GGX( { lightDirection, f0: clearcoatF0, f90: clearcoatF90, roughness: clearcoatRoughness, normalView: transformedClearcoatNormalView } ) ) );
}
reflectedLight.directDiffuse.addAssign( irradiance.mul( BRDF_Lambert( { diffuseColor: diffuseColor.rgb } ) ) );
reflectedLight.directSpecular.addAssign( irradiance.mul( BRDF_GGX( { lightDirection, f0: specularColor, f90: 1, roughness, iridescence: this.iridescence, f: this.iridescenceFresnel, USE_IRIDESCENCE: this.iridescence, USE_ANISOTROPY: this.anisotropy } ) ) );
}
/**
* This method is intended for implementing the direct light term for
* rect area light nodes.
*
* @param {Object} input - The input data.
* @param {StackNode} stack - The current stack.
* @param {NodeBuilder} builder - The current node builder.
*/
directRectArea( { lightColor, lightPosition, halfWidth, halfHeight, reflectedLight, ltc_1, ltc_2 } ) {
const p0 = lightPosition.add( halfWidth ).sub( halfHeight ); // counterclockwise; light shines in local neg z direction
const p1 = lightPosition.sub( halfWidth ).sub( halfHeight );
const p2 = lightPosition.sub( halfWidth ).add( halfHeight );
const p3 = lightPosition.add( halfWidth ).add( halfHeight );
const N = transformedNormalView;
const V = positionViewDirection;
const P = positionView.toVar();
const uv = LTC_Uv( { N, V, roughness } );
const t1 = ltc_1.sample( uv ).toVar();
const t2 = ltc_2.sample( uv ).toVar();
const mInv = mat3(
vec3( t1.x, 0, t1.y ),
vec3( 0, 1, 0 ),
vec3( t1.z, 0, t1.w )
).toVar();
// LTC Fresnel Approximation by Stephen Hill
// http://blog.selfshadow.com/publications/s2016-advances/s2016_ltc_fresnel.pdf
const fresnel = specularColor.mul( t2.x ).add( specularColor.oneMinus().mul( t2.y ) ).toVar();
reflectedLight.directSpecular.addAssign( lightColor.mul( fresnel ).mul( LTC_Evaluate( { N, V, P, mInv, p0, p1, p2, p3 } ) ) );
reflectedLight.directDiffuse.addAssign( lightColor.mul( diffuseColor ).mul( LTC_Evaluate( { N, V, P, mInv: mat3( 1, 0, 0, 0, 1, 0, 0, 0, 1 ), p0, p1, p2, p3 } ) ) );
}
/**
* Implements the indirect lighting.
*
* @param {ContextNode} context - The current node context.
* @param {StackNode} stack - The current stack.
* @param {NodeBuilder} builder - The current node builder.
*/
indirect( context, stack, builder ) {
this.indirectDiffuse( context, stack, builder );
this.indirectSpecular( context, stack, builder );
this.ambientOcclusion( context, stack, builder );
}
/**
* Implements the indirect diffuse term.
*
* @param {ContextNode} input - The current node context.
* @param {StackNode} stack - The current stack.
* @param {NodeBuilder} builder - The current node builder.
*/
indirectDiffuse( { irradiance, reflectedLight } ) {
reflectedLight.indirectDiffuse.addAssign( irradiance.mul( BRDF_Lambert( { diffuseColor } ) ) );
}
/**
* Implements the indirect specular term.
*
* @param {ContextNode} input - The current node context.
* @param {StackNode} stack - The current stack.
* @param {NodeBuilder} builder - The current node builder.
*/
indirectSpecular( { radiance, iblIrradiance, reflectedLight } ) {
if ( this.sheen === true ) {
this.sheenSpecularIndirect.addAssign( iblIrradiance.mul(
sheen,
IBLSheenBRDF( {
normal: transformedNormalView,
viewDir: positionViewDirection,
roughness: sheenRoughness
} )
) );
}
if ( this.clearcoat === true ) {
const dotNVcc = transformedClearcoatNormalView.dot( positionViewDirection ).clamp();
const clearcoatEnv = EnvironmentBRDF( {
dotNV: dotNVcc,
specularColor: clearcoatF0,
specularF90: clearcoatF90,
roughness: clearcoatRoughness
} );
this.clearcoatSpecularIndirect.addAssign( this.clearcoatRadiance.mul( clearcoatEnv ) );
}
// Both indirect specular and indirect diffuse light accumulate here
const singleScattering = vec3().toVar( 'singleScattering' );
const multiScattering = vec3().toVar( 'multiScattering' );
const cosineWeightedIrradiance = iblIrradiance.mul( 1 / Math.PI );
this.computeMultiscattering( singleScattering, multiScattering, specularF90 );
const totalScattering = singleScattering.add( multiScattering );
const diffuse = diffuseColor.mul( totalScattering.r.max( totalScattering.g ).max( totalScattering.b ).oneMinus() );
reflectedLight.indirectSpecular.addAssign( radiance.mul( singleScattering ) );
reflectedLight.indirectSpecular.addAssign( multiScattering.mul( cosineWeightedIrradiance ) );
reflectedLight.indirectDiffuse.addAssign( diffuse.mul( cosineWeightedIrradiance ) );
}
/**
* Implements the ambient occlusion term.
*
* @param {ContextNode} input - The current node context.
* @param {StackNode} stack - The current stack.
* @param {NodeBuilder} builder - The current node builder.
*/
ambientOcclusion( { ambientOcclusion, reflectedLight } ) {
const dotNV = transformedNormalView.dot( positionViewDirection ).clamp(); // @ TODO: Move to core dotNV
const aoNV = dotNV.add( ambientOcclusion );
const aoExp = roughness.mul( - 16.0 ).oneMinus().negate().exp2();
const aoNode = ambientOcclusion.sub( aoNV.pow( aoExp ).oneMinus() ).clamp();
if ( this.clearcoat === true ) {
this.clearcoatSpecularIndirect.mulAssign( ambientOcclusion );
}
if ( this.sheen === true ) {
this.sheenSpecularIndirect.mulAssign( ambientOcclusion );
}
reflectedLight.indirectDiffuse.mulAssign( ambientOcclusion );
reflectedLight.indirectSpecular.mulAssign( aoNode );
}
/**
* Used for final lighting accumulations depending on the requested features.
*
* @param {ContextNode} context - The current node context.
* @param {StackNode} stack - The current stack.
* @param {NodeBuilder} builder - The current node builder.
*/
finish( context ) {
const { outgoingLight } = context;
if ( this.clearcoat === true ) {
const dotNVcc = transformedClearcoatNormalView.dot( positionViewDirection ).clamp();
const Fcc = F_Schlick( {
dotVH: dotNVcc,
f0: clearcoatF0,
f90: clearcoatF90
} );
const clearcoatLight = outgoingLight.mul( clearcoat.mul( Fcc ).oneMinus() ).add( this.clearcoatSpecularDirect.add( this.clearcoatSpecularIndirect ).mul( clearcoat ) );
outgoingLight.assign( clearcoatLight );
}
if ( this.sheen === true ) {
const sheenEnergyComp = sheen.r.max( sheen.g ).max( sheen.b ).mul( 0.157 ).oneMinus();
const sheenLight = outgoingLight.mul( sheenEnergyComp ).add( this.sheenSpecularDirect, this.sheenSpecularIndirect );
outgoingLight.assign( sheenLight );
}
}
}
// These defines must match with PMREMGenerator
const cubeUV_r0 = /*@__PURE__*/ float( 1.0 );
const cubeUV_m0 = /*@__PURE__*/ float( - 2.0 );
const cubeUV_r1 = /*@__PURE__*/ float( 0.8 );
const cubeUV_m1 = /*@__PURE__*/ float( - 1.0 );
const cubeUV_r4 = /*@__PURE__*/ float( 0.4 );
const cubeUV_m4 = /*@__PURE__*/ float( 2.0 );
const cubeUV_r5 = /*@__PURE__*/ float( 0.305 );
const cubeUV_m5 = /*@__PURE__*/ float( 3.0 );
const cubeUV_r6 = /*@__PURE__*/ float( 0.21 );
const cubeUV_m6 = /*@__PURE__*/ float( 4.0 );
const cubeUV_minMipLevel = /*@__PURE__*/ float( 4.0 );
const cubeUV_minTileSize = /*@__PURE__*/ float( 16.0 );
// These shader functions convert between the UV coordinates of a single face of
// a cubemap, the 0-5 integer index of a cube face, and the direction vector for
// sampling a textureCube (not generally normalized ).
const getFace = /*@__PURE__*/ Fn( ( [ direction ] ) => {
const absDirection = vec3( abs( direction ) ).toVar();
const face = float( - 1.0 ).toVar();
If( absDirection.x.greaterThan( absDirection.z ), () => {
If( absDirection.x.greaterThan( absDirection.y ), () => {
face.assign( select( direction.x.greaterThan( 0.0 ), 0.0, 3.0 ) );
} ).Else( () => {
face.assign( select( direction.y.greaterThan( 0.0 ), 1.0, 4.0 ) );
} );
} ).Else( () => {
If( absDirection.z.greaterThan( absDirection.y ), () => {
face.assign( select( direction.z.greaterThan( 0.0 ), 2.0, 5.0 ) );
} ).Else( () => {
face.assign( select( direction.y.greaterThan( 0.0 ), 1.0, 4.0 ) );
} );
} );
return face;
} ).setLayout( {
name: 'getFace',
type: 'float',
inputs: [
{ name: 'direction', type: 'vec3' }
]
} );
// RH coordinate system; PMREM face-indexing convention
const getUV = /*@__PURE__*/ Fn( ( [ direction, face ] ) => {
const uv = vec2().toVar();
If( face.equal( 0.0 ), () => {
uv.assign( vec2( direction.z, direction.y ).div( abs( direction.x ) ) ); // pos x
} ).ElseIf( face.equal( 1.0 ), () => {
uv.assign( vec2( direction.x.negate(), direction.z.negate() ).div( abs( direction.y ) ) ); // pos y
} ).ElseIf( face.equal( 2.0 ), () => {
uv.assign( vec2( direction.x.negate(), direction.y ).div( abs( direction.z ) ) ); // pos z
} ).ElseIf( face.equal( 3.0 ), () => {
uv.assign( vec2( direction.z.negate(), direction.y ).div( abs( direction.x ) ) ); // neg x
} ).ElseIf( face.equal( 4.0 ), () => {
uv.assign( vec2( direction.x.negate(), direction.z ).div( abs( direction.y ) ) ); // neg y
} ).Else( () => {
uv.assign( vec2( direction.x, direction.y ).div( abs( direction.z ) ) ); // neg z
} );
return mul( 0.5, uv.add( 1.0 ) );
} ).setLayout( {
name: 'getUV',
type: 'vec2',
inputs: [
{ name: 'direction', type: 'vec3' },
{ name: 'face', type: 'float' }
]
} );
const roughnessToMip = /*@__PURE__*/ Fn( ( [ roughness ] ) => {
const mip = float( 0.0 ).toVar();
If( roughness.greaterThanEqual( cubeUV_r1 ), () => {
mip.assign( cubeUV_r0.sub( roughness ).mul( cubeUV_m1.sub( cubeUV_m0 ) ).div( cubeUV_r0.sub( cubeUV_r1 ) ).add( cubeUV_m0 ) );
} ).ElseIf( roughness.greaterThanEqual( cubeUV_r4 ), () => {
mip.assign( cubeUV_r1.sub( roughness ).mul( cubeUV_m4.sub( cubeUV_m1 ) ).div( cubeUV_r1.sub( cubeUV_r4 ) ).add( cubeUV_m1 ) );
} ).ElseIf( roughness.greaterThanEqual( cubeUV_r5 ), () => {
mip.assign( cubeUV_r4.sub( roughness ).mul( cubeUV_m5.sub( cubeUV_m4 ) ).div( cubeUV_r4.sub( cubeUV_r5 ) ).add( cubeUV_m4 ) );
} ).ElseIf( roughness.greaterThanEqual( cubeUV_r6 ), () => {
mip.assign( cubeUV_r5.sub( roughness ).mul( cubeUV_m6.sub( cubeUV_m5 ) ).div( cubeUV_r5.sub( cubeUV_r6 ) ).add( cubeUV_m5 ) );
} ).Else( () => {
mip.assign( float( - 2.0 ).mul( log2( mul( 1.16, roughness ) ) ) ); // 1.16 = 1.79^0.25
} );
return mip;
} ).setLayout( {
name: 'roughnessToMip',
type: 'float',
inputs: [
{ name: 'roughness', type: 'float' }
]
} );
// RH coordinate system; PMREM face-indexing convention
const getDirection = /*@__PURE__*/ Fn( ( [ uv_immutable, face ] ) => {
const uv = uv_immutable.toVar();
uv.assign( mul( 2.0, uv ).sub( 1.0 ) );
const direction = vec3( uv, 1.0 ).toVar();
If( face.equal( 0.0 ), () => {
direction.assign( direction.zyx ); // ( 1, v, u ) pos x
} ).ElseIf( face.equal( 1.0 ), () => {
direction.assign( direction.xzy );
direction.xz.mulAssign( - 1.0 ); // ( -u, 1, -v ) pos y
} ).ElseIf( face.equal( 2.0 ), () => {
direction.x.mulAssign( - 1.0 ); // ( -u, v, 1 ) pos z
} ).ElseIf( face.equal( 3.0 ), () => {
direction.assign( direction.zyx );
direction.xz.mulAssign( - 1.0 ); // ( -1, v, -u ) neg x
} ).ElseIf( face.equal( 4.0 ), () => {
direction.assign( direction.xzy );
direction.xy.mulAssign( - 1.0 ); // ( -u, -1, v ) neg y
} ).ElseIf( face.equal( 5.0 ), () => {
direction.z.mulAssign( - 1.0 ); // ( u, v, -1 ) neg zS
} );
return direction;
} ).setLayout( {
name: 'getDirection',
type: 'vec3',
inputs: [
{ name: 'uv', type: 'vec2' },
{ name: 'face', type: 'float' }
]
} );
//
const textureCubeUV = /*@__PURE__*/ Fn( ( [ envMap, sampleDir_immutable, roughness_immutable, CUBEUV_TEXEL_WIDTH, CUBEUV_TEXEL_HEIGHT, CUBEUV_MAX_MIP ] ) => {
const roughness = float( roughness_immutable );
const sampleDir = vec3( sampleDir_immutable );
const mip = clamp( roughnessToMip( roughness ), cubeUV_m0, CUBEUV_MAX_MIP );
const mipF = fract( mip );
const mipInt = floor( mip );
const color0 = vec3( bilinearCubeUV( envMap, sampleDir, mipInt, CUBEUV_TEXEL_WIDTH, CUBEUV_TEXEL_HEIGHT, CUBEUV_MAX_MIP ) ).toVar();
If( mipF.notEqual( 0.0 ), () => {
const color1 = vec3( bilinearCubeUV( envMap, sampleDir, mipInt.add( 1.0 ), CUBEUV_TEXEL_WIDTH, CUBEUV_TEXEL_HEIGHT, CUBEUV_MAX_MIP ) ).toVar();
color0.assign( mix( color0, color1, mipF ) );
} );
return color0;
} );
const bilinearCubeUV = /*@__PURE__*/ Fn( ( [ envMap, direction_immutable, mipInt_immutable, CUBEUV_TEXEL_WIDTH, CUBEUV_TEXEL_HEIGHT, CUBEUV_MAX_MIP ] ) => {
const mipInt = float( mipInt_immutable ).toVar();
const direction = vec3( direction_immutable );
const face = float( getFace( direction ) ).toVar();
const filterInt = float( max$1( cubeUV_minMipLevel.sub( mipInt ), 0.0 ) ).toVar();
mipInt.assign( max$1( mipInt, cubeUV_minMipLevel ) );
const faceSize = float( exp2( mipInt ) ).toVar();
const uv = vec2( getUV( direction, face ).mul( faceSize.sub( 2.0 ) ).add( 1.0 ) ).toVar();
If( face.greaterThan( 2.0 ), () => {
uv.y.addAssign( faceSize );
face.subAssign( 3.0 );
} );
uv.x.addAssign( face.mul( faceSize ) );
uv.x.addAssign( filterInt.mul( mul( 3.0, cubeUV_minTileSize ) ) );
uv.y.addAssign( mul( 4.0, exp2( CUBEUV_MAX_MIP ).sub( faceSize ) ) );
uv.x.mulAssign( CUBEUV_TEXEL_WIDTH );
uv.y.mulAssign( CUBEUV_TEXEL_HEIGHT );
return envMap.sample( uv ).grad( vec2(), vec2() ); // disable anisotropic filtering
} );
const getSample = /*@__PURE__*/ Fn( ( { envMap, mipInt, outputDirection, theta, axis, CUBEUV_TEXEL_WIDTH, CUBEUV_TEXEL_HEIGHT, CUBEUV_MAX_MIP } ) => {
const cosTheta = cos( theta );
// Rodrigues' axis-angle rotation
const sampleDirection = outputDirection.mul( cosTheta )
.add( axis.cross( outputDirection ).mul( sin( theta ) ) )
.add( axis.mul( axis.dot( outputDirection ).mul( cosTheta.oneMinus() ) ) );
return bilinearCubeUV( envMap, sampleDirection, mipInt, CUBEUV_TEXEL_WIDTH, CUBEUV_TEXEL_HEIGHT, CUBEUV_MAX_MIP );
} );
const blur = /*@__PURE__*/ Fn( ( { n, latitudinal, poleAxis, outputDirection, weights, samples, dTheta, mipInt, envMap, CUBEUV_TEXEL_WIDTH, CUBEUV_TEXEL_HEIGHT, CUBEUV_MAX_MIP } ) => {
const axis = vec3( select( latitudinal, poleAxis, cross( poleAxis, outputDirection ) ) ).toVar();
If( all( axis.equals( vec3( 0.0 ) ) ), () => {
axis.assign( vec3( outputDirection.z, 0.0, outputDirection.x.negate() ) );
} );
axis.assign( normalize( axis ) );
const gl_FragColor = vec3().toVar();
gl_FragColor.addAssign( weights.element( 0 ).mul( getSample( { theta: 0.0, axis, outputDirection, mipInt, envMap, CUBEUV_TEXEL_WIDTH, CUBEUV_TEXEL_HEIGHT, CUBEUV_MAX_MIP } ) ) );
Loop( { start: int( 1 ), end: n }, ( { i } ) => {
If( i.greaterThanEqual( samples ), () => {
Break();
} );
const theta = float( dTheta.mul( float( i ) ) ).toVar();
gl_FragColor.addAssign( weights.element( i ).mul( getSample( { theta: theta.mul( - 1.0 ), axis, outputDirection, mipInt, envMap, CUBEUV_TEXEL_WIDTH, CUBEUV_TEXEL_HEIGHT, CUBEUV_MAX_MIP } ) ) );
gl_FragColor.addAssign( weights.element( i ).mul( getSample( { theta, axis, outputDirection, mipInt, envMap, CUBEUV_TEXEL_WIDTH, CUBEUV_TEXEL_HEIGHT, CUBEUV_MAX_MIP } ) ) );
} );
return vec4( gl_FragColor, 1 );
} );
/** @module PMREMNode **/
let _generator = null;
const _cache = new WeakMap();
/**
* Generates the cubeUV size based on the given image height.
*
* @private
* @param {Number} imageHeight - The image height.
* @return {{texelWidth: Number,texelHeight: Number, maxMip: Number}} The result object.
*/
function _generateCubeUVSize( imageHeight ) {
const maxMip = Math.log2( imageHeight ) - 2;
const texelHeight = 1.0 / imageHeight;
const texelWidth = 1.0 / ( 3 * Math.max( Math.pow( 2, maxMip ), 7 * 16 ) );
return { texelWidth, texelHeight, maxMip };
}
/**
* Generates a PMREM from the given texture .
*
* @private
* @param {Texture} texture - The texture to create the PMREM for.
* @return {Texture} The PMREM.
*/
function _getPMREMFromTexture( texture ) {
let cacheTexture = _cache.get( texture );
const pmremVersion = cacheTexture !== undefined ? cacheTexture.pmremVersion : - 1;
if ( pmremVersion !== texture.pmremVersion ) {
const image = texture.image;
if ( texture.isCubeTexture ) {
if ( isCubeMapReady( image ) ) {
cacheTexture = _generator.fromCubemap( texture, cacheTexture );
} else {
return null;
}
} else {
if ( isEquirectangularMapReady( image ) ) {
cacheTexture = _generator.fromEquirectangular( texture, cacheTexture );
} else {
return null;
}
}
cacheTexture.pmremVersion = texture.pmremVersion;
_cache.set( texture, cacheTexture );
}
return cacheTexture.texture;
}
/**
* This node represents a PMREM which is a special type of preprocessed
* environment map intended for PBR materials.
*
* ```js
* const material = new MeshStandardNodeMaterial();
* material.envNode = pmremTexture( envMap );
* ```
*
* @augments TempNode
*/
class PMREMNode extends TempNode {
static get type() {
return 'PMREMNode';
}
/**
* Constructs a new function overloading node.
*
* @param {Texture} value - The input texture.
* @param {Node<vec2>} [uvNode=null] - The uv node.
* @param {Node<float>} [levelNode=null] - The level node.
*/
constructor( value, uvNode = null, levelNode = null ) {
super( 'vec3' );
/**
* Reference to the input texture.
*
* @private
* @type {Texture}
*/
this._value = value;
/**
* Reference to the generated PMREM.
*
* @private
* @type {Texture | null}
* @default null
*/
this._pmrem = null;
/**
* The uv node.
*
* @type {Node<vec2>}
*/
this.uvNode = uvNode;
/**
* The level node.
*
* @type {Node<float>}
*/
this.levelNode = levelNode;
/**
* Reference to a PMREM generator.
*
* @private
* @type {PMREMGenerator}
* @default null
*/
this._generator = null;
const defaultTexture = new Texture();
defaultTexture.isRenderTargetTexture = true;
/**
* The texture node holding the generated PMREM.
*
* @private
* @type {TextureNode}
*/
this._texture = texture( defaultTexture );
/**
* A uniform representing the PMREM's width.
*
* @private
* @type {UniformNode<float>}
*/
this._width = uniform( 0 );
/**
* A uniform representing the PMREM's height.
*
* @private
* @type {UniformNode<float>}
*/
this._height = uniform( 0 );
/**
* A uniform representing the PMREM's max Mip.
*
* @private
* @type {UniformNode<float>}
*/
this._maxMip = uniform( 0 );
/**
* The `updateBeforeType` is set to `NodeUpdateType.RENDER`.
*
* @type {String}
* @default 'render'
*/
this.updateBeforeType = NodeUpdateType.RENDER;
}
set value( value ) {
this._value = value;
this._pmrem = null;
}
/**
* The node's texture value.
*
* @type {Texture}
*/
get value() {
return this._value;
}
/**
* Uses the given PMREM texture to update internal values.
*
* @param {Texture} texture - The PMREM texture.
*/
updateFromTexture( texture ) {
const cubeUVSize = _generateCubeUVSize( texture.image.height );
this._texture.value = texture;
this._width.value = cubeUVSize.texelWidth;
this._height.value = cubeUVSize.texelHeight;
this._maxMip.value = cubeUVSize.maxMip;
}
updateBefore() {
let pmrem = this._pmrem;
const pmremVersion = pmrem ? pmrem.pmremVersion : - 1;
const texture = this._value;
if ( pmremVersion !== texture.pmremVersion ) {
if ( texture.isPMREMTexture === true ) {
pmrem = texture;
} else {
pmrem = _getPMREMFromTexture( texture );
}
if ( pmrem !== null ) {
this._pmrem = pmrem;
this.updateFromTexture( pmrem );
}
}
}
setup( builder ) {
if ( _generator === null ) {
_generator = builder.createPMREMGenerator();
}
//
this.updateBefore( builder );
//
let uvNode = this.uvNode;
if ( uvNode === null && builder.context.getUV ) {
uvNode = builder.context.getUV( this );
}
//
const texture = this.value;
if ( builder.renderer.coordinateSystem === WebGLCoordinateSystem && texture.isPMREMTexture !== true && texture.isRenderTargetTexture === true ) {
uvNode = vec3( uvNode.x.negate(), uvNode.yz );
}
uvNode = vec3( uvNode.x, uvNode.y.negate(), uvNode.z );
//
let levelNode = this.levelNode;
if ( levelNode === null && builder.context.getTextureLevel ) {
levelNode = builder.context.getTextureLevel( this );
}
//
return textureCubeUV( this._texture, uvNode, levelNode, this._width, this._height, this._maxMip );
}
}
/**
* Returns `true` if the given cube map image has been fully loaded.
*
* @private
* @param {Array<(Image|Object)>} image - The cube map image.
* @return {Boolean} Whether the given cube map is ready or not.
*/
function isCubeMapReady( image ) {
if ( image === null || image === undefined ) return false;
let count = 0;
const length = 6;
for ( let i = 0; i < length; i ++ ) {
if ( image[ i ] !== undefined ) count ++;
}
return count === length;
}
/**
* Returns `true` if the given equirectangular image has been fully loaded.
*
* @private
* @param {(Image|Object)} image - The equirectangular image.
* @return {Boolean} Whether the given cube map is ready or not.
*/
function isEquirectangularMapReady( image ) {
if ( image === null || image === undefined ) return false;
return image.height > 0;
}
/**
* TSL function for creating a PMREM node.
*
* @function
* @param {Texture} value - The input texture.
* @param {Node<vec2>} [uvNode=null] - The uv node.
* @param {Node<float>} [levelNode=null] - The level node.
* @returns {PMREMNode}
*/
const pmremTexture = /*@__PURE__*/ nodeProxy( PMREMNode );
const _envNodeCache = new WeakMap();
/**
* Represents a physical model for Image-based lighting (IBL). The environment
* is defined via environment maps in the equirectangular, cube map or cubeUV (PMREM) format.
* `EnvironmentNode` is intended for PBR materials like {@link MeshStandardNodeMaterial}.
*
* @augments LightingNode
*/
class EnvironmentNode extends LightingNode {
static get type() {
return 'EnvironmentNode';
}
/**
* Constructs a new environment node.
*
* @param {Node} [envNode=null] - A node representing the environment.
*/
constructor( envNode = null ) {
super();
/**
* A node representing the environment.
*
* @type {Node?}
* @default null
*/
this.envNode = envNode;
}
setup( builder ) {
const { material } = builder;
let envNode = this.envNode;
if ( envNode.isTextureNode || envNode.isMaterialReferenceNode ) {
const value = ( envNode.isTextureNode ) ? envNode.value : material[ envNode.property ];
let cacheEnvNode = _envNodeCache.get( value );
if ( cacheEnvNode === undefined ) {
cacheEnvNode = pmremTexture( value );
_envNodeCache.set( value, cacheEnvNode );
}
envNode = cacheEnvNode;
}
//
const envMap = material.envMap;
const intensity = envMap ? reference( 'envMapIntensity', 'float', builder.material ) : reference( 'environmentIntensity', 'float', builder.scene ); // @TODO: Add materialEnvIntensity in MaterialNode
const useAnisotropy = material.useAnisotropy === true || material.anisotropy > 0;
const radianceNormalView = useAnisotropy ? transformedBentNormalView : transformedNormalView;
const radiance = envNode.context( createRadianceContext( roughness, radianceNormalView ) ).mul( intensity );
const irradiance = envNode.context( createIrradianceContext( transformedNormalWorld ) ).mul( Math.PI ).mul( intensity );
const isolateRadiance = cache( radiance );
const isolateIrradiance = cache( irradiance );
//
builder.context.radiance.addAssign( isolateRadiance );
builder.context.iblIrradiance.addAssign( isolateIrradiance );
//
const clearcoatRadiance = builder.context.lightingModel.clearcoatRadiance;
if ( clearcoatRadiance ) {
const clearcoatRadianceContext = envNode.context( createRadianceContext( clearcoatRoughness, transformedClearcoatNormalView ) ).mul( intensity );
const isolateClearcoatRadiance = cache( clearcoatRadianceContext );
clearcoatRadiance.addAssign( isolateClearcoatRadiance );
}
}
}
const createRadianceContext = ( roughnessNode, normalViewNode ) => {
let reflectVec = null;
return {
getUV: () => {
if ( reflectVec === null ) {
reflectVec = positionViewDirection.negate().reflect( normalViewNode );
// Mixing the reflection with the normal is more accurate and keeps rough objects from gathering light from behind their tangent plane.
reflectVec = roughnessNode.mul( roughnessNode ).mix( reflectVec, normalViewNode ).normalize();
reflectVec = reflectVec.transformDirection( cameraViewMatrix );
}
return reflectVec;
},
getTextureLevel: () => {
return roughnessNode;
}
};
};
const createIrradianceContext = ( normalWorldNode ) => {
return {
getUV: () => {
return normalWorldNode;
},
getTextureLevel: () => {
return float( 1.0 );
}
};
};
const _defaultValues$6 = /*@__PURE__*/ new MeshStandardMaterial();
/**
* Node material version of `MeshStandardMaterial`.
*
* @augments NodeMaterial
*/
class MeshStandardNodeMaterial extends NodeMaterial {
static get type() {
return 'MeshStandardNodeMaterial';
}
/**
* Constructs a new mesh standard node material.
*
* @param {Object?} parameters - The configuration parameter.
*/
constructor( parameters ) {
super();
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isMeshStandardNodeMaterial = true;
/**
* Set to `true` because standard materials react on lights.
*
* @type {Boolean}
* @default true
*/
this.lights = true;
/**
* The emissive color of standard materials is by default inferred from the `emissive`,
* `emissiveIntensity` and `emissiveMap` properties. This node property allows to
* overwrite the default and define the emissive color with a node instead.
*
* If you don't want to overwrite the emissive color but modify the existing
* value instead, use {@link module:MaterialNode.materialEmissive}.
*
* @type {Node<vec3>?}
* @default null
*/
this.emissiveNode = null;
/**
* The metalness of standard materials is by default inferred from the `metalness`,
* and `metalnessMap` properties. This node property allows to
* overwrite the default and define the metalness with a node instead.
*
* If you don't want to overwrite the metalness but modify the existing
* value instead, use {@link module:MaterialNode.materialMetalness}.
*
* @type {Node<float>?}
* @default null
*/
this.metalnessNode = null;
/**
* The roughness of standard materials is by default inferred from the `roughness`,
* and `roughnessMap` properties. This node property allows to
* overwrite the default and define the roughness with a node instead.
*
* If you don't want to overwrite the roughness but modify the existing
* value instead, use {@link module:MaterialNode.materialRoughness}.
*
* @type {Node<float>?}
* @default null
*/
this.roughnessNode = null;
this.setDefaultValues( _defaultValues$6 );
this.setValues( parameters );
}
/**
* Overwritten since this type of material uses {@link EnvironmentNode}
* to implement the PBR (PMREM based) environment mapping. Besides, the
* method honors `Scene.environment`.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {EnvironmentNode<vec3>?} The environment node.
*/
setupEnvironment( builder ) {
let envNode = super.setupEnvironment( builder );
if ( envNode === null && builder.environmentNode ) {
envNode = builder.environmentNode;
}
return envNode ? new EnvironmentNode( envNode ) : null;
}
/**
* Setups the lighting model.
*
* @return {PhysicalLightingModel} The lighting model.
*/
setupLightingModel( /*builder*/ ) {
return new PhysicalLightingModel();
}
/**
* Setups the specular related node variables.
*/
setupSpecular() {
const specularColorNode = mix( vec3( 0.04 ), diffuseColor.rgb, metalness );
specularColor.assign( specularColorNode );
specularF90.assign( 1.0 );
}
/**
* Setups the standard specific node variables.
*
* @param {NodeBuilder} builder - The current node builder.
*/
setupVariants() {
// METALNESS
const metalnessNode = this.metalnessNode ? float( this.metalnessNode ) : materialMetalness;
metalness.assign( metalnessNode );
// ROUGHNESS
let roughnessNode = this.roughnessNode ? float( this.roughnessNode ) : materialRoughness;
roughnessNode = getRoughness( { roughness: roughnessNode } );
roughness.assign( roughnessNode );
// SPECULAR COLOR
this.setupSpecular();
// DIFFUSE COLOR
diffuseColor.assign( vec4( diffuseColor.rgb.mul( metalnessNode.oneMinus() ), diffuseColor.a ) );
}
copy( source ) {
this.emissiveNode = source.emissiveNode;
this.metalnessNode = source.metalnessNode;
this.roughnessNode = source.roughnessNode;
return super.copy( source );
}
}
const _defaultValues$5 = /*@__PURE__*/ new MeshPhysicalMaterial();
/**
* Node material version of `MeshPhysicalMaterial`.
*
* @augments MeshStandardNodeMaterial
*/
class MeshPhysicalNodeMaterial extends MeshStandardNodeMaterial {
static get type() {
return 'MeshPhysicalNodeMaterial';
}
/**
* Constructs a new mesh physical node material.
*
* @param {Object?} parameters - The configuration parameter.
*/
constructor( parameters ) {
super();
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isMeshPhysicalNodeMaterial = true;
/**
* The clearcoat of physical materials is by default inferred from the `clearcoat`
* and `clearcoatMap` properties. This node property allows to overwrite the default
* and define the clearcoat with a node instead.
*
* If you don't want to overwrite the clearcoat but modify the existing
* value instead, use {@link module:MaterialNode.materialClearcoat}.
*
* @type {Node<float>?}
* @default null
*/
this.clearcoatNode = null;
/**
* The clearcoat roughness of physical materials is by default inferred from the `clearcoatRoughness`
* and `clearcoatRoughnessMap` properties. This node property allows to overwrite the default
* and define the clearcoat roughness with a node instead.
*
* If you don't want to overwrite the clearcoat roughness but modify the existing
* value instead, use {@link module:MaterialNode.materialClearcoatRoughness}.
*
* @type {Node<float>?}
* @default null
*/
this.clearcoatRoughnessNode = null;
/**
* The clearcoat normal of physical materials is by default inferred from the `clearcoatNormalMap`
* property. This node property allows to overwrite the default
* and define the clearcoat normal with a node instead.
*
* If you don't want to overwrite the clearcoat normal but modify the existing
* value instead, use {@link module:MaterialNode.materialClearcoatNormal}.
*
* @type {Node<vec3>?}
* @default null
*/
this.clearcoatNormalNode = null;
/**
* The sheen of physical materials is by default inferred from the `sheen`, `sheenColor`
* and `sheenColorMap` properties. This node property allows to overwrite the default
* and define the sheen with a node instead.
*
* If you don't want to overwrite the sheen but modify the existing
* value instead, use {@link module:MaterialNode.materialSheen}.
*
* @type {Node<vec3>?}
* @default null
*/
this.sheenNode = null;
/**
* The sheen roughness of physical materials is by default inferred from the `sheenRoughness` and
* `sheenRoughnessMap` properties. This node property allows to overwrite the default
* and define the sheen roughness with a node instead.
*
* If you don't want to overwrite the sheen roughness but modify the existing
* value instead, use {@link module:MaterialNode.materialSheenRoughness}.
*
* @type {Node<float>?}
* @default null
*/
this.sheenRoughnessNode = null;
/**
* The iridescence of physical materials is by default inferred from the `iridescence`
* property. This node property allows to overwrite the default
* and define the iridescence with a node instead.
*
* If you don't want to overwrite the iridescence but modify the existing
* value instead, use {@link module:MaterialNode.materialIridescence}.
*
* @type {Node<float>?}
* @default null
*/
this.iridescenceNode = null;
/**
* The iridescence IOR of physical materials is by default inferred from the `iridescenceIOR`
* property. This node property allows to overwrite the default
* and define the iridescence IOR with a node instead.
*
* If you don't want to overwrite the iridescence IOR but modify the existing
* value instead, use {@link module:MaterialNode.materialIridescenceIOR}.
*
* @type {Node<float>?}
* @default null
*/
this.iridescenceIORNode = null;
/**
* The iridescence thickness of physical materials is by default inferred from the `iridescenceThicknessRange`
* and `iridescenceThicknessMap` properties. This node property allows to overwrite the default
* and define the iridescence thickness with a node instead.
*
* If you don't want to overwrite the iridescence thickness but modify the existing
* value instead, use {@link module:MaterialNode.materialIridescenceThickness}.
*
* @type {Node<float>?}
* @default null
*/
this.iridescenceThicknessNode = null;
/**
* The specular intensity of physical materials is by default inferred from the `specularIntensity`
* and `specularIntensityMap` properties. This node property allows to overwrite the default
* and define the specular intensity with a node instead.
*
* If you don't want to overwrite the specular intensity but modify the existing
* value instead, use {@link module:MaterialNode.materialSpecularIntensity}.
*
* @type {Node<float>?}
* @default null
*/
this.specularIntensityNode = null;
/**
* The specular color of physical materials is by default inferred from the `specularColor`
* and `specularColorMap` properties. This node property allows to overwrite the default
* and define the specular color with a node instead.
*
* If you don't want to overwrite the specular color but modify the existing
* value instead, use {@link module:MaterialNode.materialSpecularColor}.
*
* @type {Node<vec3>?}
* @default null
*/
this.specularColorNode = null;
/**
* The ior of physical materials is by default inferred from the `ior`
* property. This node property allows to overwrite the default
* and define the ior with a node instead.
*
* If you don't want to overwrite the ior but modify the existing
* value instead, use {@link module:MaterialNode.materialIOR}.
*
* @type {Node<float>?}
* @default null
*/
this.iorNode = null;
/**
* The transmission of physical materials is by default inferred from the `transmission` and
* `transmissionMap` properties. This node property allows to overwrite the default
* and define the transmission with a node instead.
*
* If you don't want to overwrite the transmission but modify the existing
* value instead, use {@link module:MaterialNode.materialTransmission}.
*
* @type {Node<float>?}
* @default null
*/
this.transmissionNode = null;
/**
* The thickness of physical materials is by default inferred from the `thickness` and
* `thicknessMap` properties. This node property allows to overwrite the default
* and define the thickness with a node instead.
*
* If you don't want to overwrite the thickness but modify the existing
* value instead, use {@link module:MaterialNode.materialThickness}.
*
* @type {Node<float>?}
* @default null
*/
this.thicknessNode = null;
/**
* The attenuation distance of physical materials is by default inferred from the
* `attenuationDistance` property. This node property allows to overwrite the default
* and define the attenuation distance with a node instead.
*
* If you don't want to overwrite the attenuation distance but modify the existing
* value instead, use {@link module:MaterialNode.materialAttenuationDistance}.
*
* @type {Node<float>?}
* @default null
*/
this.attenuationDistanceNode = null;
/**
* The attenuation color of physical materials is by default inferred from the
* `attenuationColor` property. This node property allows to overwrite the default
* and define the attenuation color with a node instead.
*
* If you don't want to overwrite the attenuation color but modify the existing
* value instead, use {@link module:MaterialNode.materialAttenuationColor}.
*
* @type {Node<vec3>?}
* @default null
*/
this.attenuationColorNode = null;
/**
* The dispersion of physical materials is by default inferred from the
* `dispersion` property. This node property allows to overwrite the default
* and define the dispersion with a node instead.
*
* If you don't want to overwrite the dispersion but modify the existing
* value instead, use {@link module:MaterialNode.materialDispersion}.
*
* @type {Node<float>?}
* @default null
*/
this.dispersionNode = null;
/**
* The anisotropy of physical materials is by default inferred from the
* `anisotropy` property. This node property allows to overwrite the default
* and define the anisotropy with a node instead.
*
* If you don't want to overwrite the anisotropy but modify the existing
* value instead, use {@link module:MaterialNode.materialAnisotropy}.
*
* @type {Node<float>?}
* @default null
*/
this.anisotropyNode = null;
this.setDefaultValues( _defaultValues$5 );
this.setValues( parameters );
}
/**
* Whether the lighting model should use clearcoat or not.
*
* @type {Boolean}
* @default true
*/
get useClearcoat() {
return this.clearcoat > 0 || this.clearcoatNode !== null;
}
/**
* Whether the lighting model should use iridescence or not.
*
* @type {Boolean}
* @default true
*/
get useIridescence() {
return this.iridescence > 0 || this.iridescenceNode !== null;
}
/**
* Whether the lighting model should use sheen or not.
*
* @type {Boolean}
* @default true
*/
get useSheen() {
return this.sheen > 0 || this.sheenNode !== null;
}
/**
* Whether the lighting model should use anisotropy or not.
*
* @type {Boolean}
* @default true
*/
get useAnisotropy() {
return this.anisotropy > 0 || this.anisotropyNode !== null;
}
/**
* Whether the lighting model should use transmission or not.
*
* @type {Boolean}
* @default true
*/
get useTransmission() {
return this.transmission > 0 || this.transmissionNode !== null;
}
/**
* Whether the lighting model should use dispersion or not.
*
* @type {Boolean}
* @default true
*/
get useDispersion() {
return this.dispersion > 0 || this.dispersionNode !== null;
}
/**
* Setups the specular related node variables.
*/
setupSpecular() {
const iorNode = this.iorNode ? float( this.iorNode ) : materialIOR;
ior.assign( iorNode );
specularColor.assign( mix( min$1( pow2( ior.sub( 1.0 ).div( ior.add( 1.0 ) ) ).mul( materialSpecularColor ), vec3( 1.0 ) ).mul( materialSpecularIntensity ), diffuseColor.rgb, metalness ) );
specularF90.assign( mix( materialSpecularIntensity, 1.0, metalness ) );
}
/**
* Setups the lighting model.
*
* @return {PhysicalLightingModel} The lighting model.
*/
setupLightingModel( /*builder*/ ) {
return new PhysicalLightingModel( this.useClearcoat, this.useSheen, this.useIridescence, this.useAnisotropy, this.useTransmission, this.useDispersion );
}
/**
* Setups the physical specific node variables.
*
* @param {NodeBuilder} builder - The current node builder.
*/
setupVariants( builder ) {
super.setupVariants( builder );
// CLEARCOAT
if ( this.useClearcoat ) {
const clearcoatNode = this.clearcoatNode ? float( this.clearcoatNode ) : materialClearcoat;
const clearcoatRoughnessNode = this.clearcoatRoughnessNode ? float( this.clearcoatRoughnessNode ) : materialClearcoatRoughness;
clearcoat.assign( clearcoatNode );
clearcoatRoughness.assign( getRoughness( { roughness: clearcoatRoughnessNode } ) );
}
// SHEEN
if ( this.useSheen ) {
const sheenNode = this.sheenNode ? vec3( this.sheenNode ) : materialSheen;
const sheenRoughnessNode = this.sheenRoughnessNode ? float( this.sheenRoughnessNode ) : materialSheenRoughness;
sheen.assign( sheenNode );
sheenRoughness.assign( sheenRoughnessNode );
}
// IRIDESCENCE
if ( this.useIridescence ) {
const iridescenceNode = this.iridescenceNode ? float( this.iridescenceNode ) : materialIridescence;
const iridescenceIORNode = this.iridescenceIORNode ? float( this.iridescenceIORNode ) : materialIridescenceIOR;
const iridescenceThicknessNode = this.iridescenceThicknessNode ? float( this.iridescenceThicknessNode ) : materialIridescenceThickness;
iridescence.assign( iridescenceNode );
iridescenceIOR.assign( iridescenceIORNode );
iridescenceThickness.assign( iridescenceThicknessNode );
}
// ANISOTROPY
if ( this.useAnisotropy ) {
const anisotropyV = ( this.anisotropyNode ? vec2( this.anisotropyNode ) : materialAnisotropy ).toVar();
anisotropy.assign( anisotropyV.length() );
If( anisotropy.equal( 0.0 ), () => {
anisotropyV.assign( vec2( 1.0, 0.0 ) );
} ).Else( () => {
anisotropyV.divAssign( vec2( anisotropy ) );
anisotropy.assign( anisotropy.saturate() );
} );
// Roughness along the anisotropy bitangent is the material roughness, while the tangent roughness increases with anisotropy.
alphaT.assign( anisotropy.pow2().mix( roughness.pow2(), 1.0 ) );
anisotropyT.assign( TBNViewMatrix[ 0 ].mul( anisotropyV.x ).add( TBNViewMatrix[ 1 ].mul( anisotropyV.y ) ) );
anisotropyB.assign( TBNViewMatrix[ 1 ].mul( anisotropyV.x ).sub( TBNViewMatrix[ 0 ].mul( anisotropyV.y ) ) );
}
// TRANSMISSION
if ( this.useTransmission ) {
const transmissionNode = this.transmissionNode ? float( this.transmissionNode ) : materialTransmission;
const thicknessNode = this.thicknessNode ? float( this.thicknessNode ) : materialThickness;
const attenuationDistanceNode = this.attenuationDistanceNode ? float( this.attenuationDistanceNode ) : materialAttenuationDistance;
const attenuationColorNode = this.attenuationColorNode ? vec3( this.attenuationColorNode ) : materialAttenuationColor;
transmission.assign( transmissionNode );
thickness.assign( thicknessNode );
attenuationDistance.assign( attenuationDistanceNode );
attenuationColor.assign( attenuationColorNode );
if ( this.useDispersion ) {
const dispersionNode = this.dispersionNode ? float( this.dispersionNode ) : materialDispersion;
dispersion.assign( dispersionNode );
}
}
}
/**
* Setups the clearcoat normal node.
*
* @return {Node<vec3>} The clearcoat normal.
*/
setupClearcoatNormal() {
return this.clearcoatNormalNode ? vec3( this.clearcoatNormalNode ) : materialClearcoatNormal;
}
setup( builder ) {
builder.context.setupClearcoatNormal = () => this.setupClearcoatNormal( builder );
super.setup( builder );
}
copy( source ) {
this.clearcoatNode = source.clearcoatNode;
this.clearcoatRoughnessNode = source.clearcoatRoughnessNode;
this.clearcoatNormalNode = source.clearcoatNormalNode;
this.sheenNode = source.sheenNode;
this.sheenRoughnessNode = source.sheenRoughnessNode;
this.iridescenceNode = source.iridescenceNode;
this.iridescenceIORNode = source.iridescenceIORNode;
this.iridescenceThicknessNode = source.iridescenceThicknessNode;
this.specularIntensityNode = source.specularIntensityNode;
this.specularColorNode = source.specularColorNode;
this.transmissionNode = source.transmissionNode;
this.thicknessNode = source.thicknessNode;
this.attenuationDistanceNode = source.attenuationDistanceNode;
this.attenuationColorNode = source.attenuationColorNode;
this.dispersionNode = source.dispersionNode;
this.anisotropyNode = source.anisotropyNode;
return super.copy( source );
}
}
/** @module MeshSSSNodeMaterial **/
/**
* Represents the lighting model for {@link MeshSSSNodeMaterial}.
*
* @augments PhysicalLightingModel
*/
class SSSLightingModel extends PhysicalLightingModel {
/**
* Constructs a new physical lighting model.
*
* @param {Boolean} [clearcoat=false] - Whether clearcoat is supported or not.
* @param {Boolean} [sheen=false] - Whether sheen is supported or not.
* @param {Boolean} [iridescence=false] - Whether iridescence is supported or not.
* @param {Boolean} [anisotropy=false] - Whether anisotropy is supported or not.
* @param {Boolean} [transmission=false] - Whether transmission is supported or not.
* @param {Boolean} [dispersion=false] - Whether dispersion is supported or not.
* @param {Boolean} [sss=false] - Whether SSS is supported or not.
*/
constructor( clearcoat = false, sheen = false, iridescence = false, anisotropy = false, transmission = false, dispersion = false, sss = false ) {
super( clearcoat, sheen, iridescence, anisotropy, transmission, dispersion );
/**
* Whether the lighting model should use SSS or not.
*
* @type {Boolean}
* @default false
*/
this.useSSS = sss;
}
/**
* Extends the default implementation with a SSS term.
*
* Reference: [Approximating Translucency for a Fast, Cheap and Convincing Subsurface Scattering Look]{@link https://colinbarrebrisebois.com/2011/03/07/gdc-2011-approximating-translucency-for-a-fast-cheap-and-convincing-subsurface-scattering-look/}
*
* @param {Object} input - The input data.
* @param {StackNode} stack - The current stack.
* @param {NodeBuilder} builder - The current node builder.
*/
direct( { lightDirection, lightColor, reflectedLight }, stack, builder ) {
if ( this.useSSS === true ) {
const material = builder.material;
const { thicknessColorNode, thicknessDistortionNode, thicknessAmbientNode, thicknessAttenuationNode, thicknessPowerNode, thicknessScaleNode } = material;
const scatteringHalf = lightDirection.add( transformedNormalView.mul( thicknessDistortionNode ) ).normalize();
const scatteringDot = float( positionViewDirection.dot( scatteringHalf.negate() ).saturate().pow( thicknessPowerNode ).mul( thicknessScaleNode ) );
const scatteringIllu = vec3( scatteringDot.add( thicknessAmbientNode ).mul( thicknessColorNode ) );
reflectedLight.directDiffuse.addAssign( scatteringIllu.mul( thicknessAttenuationNode.mul( lightColor ) ) );
}
super.direct( { lightDirection, lightColor, reflectedLight }, stack, builder );
}
}
/**
* This node material is an experimental extension of {@link MeshPhysicalNodeMaterial}
* that implements a Subsurface scattering (SSS) term.
*
* @augments MeshPhysicalNodeMaterial
*/
class MeshSSSNodeMaterial extends MeshPhysicalNodeMaterial {
static get type() {
return 'MeshSSSNodeMaterial';
}
/**
* Constructs a new mesh SSS node material.
*
* @param {Object?} parameters - The configuration parameter.
*/
constructor( parameters ) {
super( parameters );
/**
* Represents the thickness color.
*
* @type {Node<vec3>?}
* @default null
*/
this.thicknessColorNode = null;
/**
* Represents the distortion factor.
*
* @type {Node<float>?}
*/
this.thicknessDistortionNode = float( 0.1 );
/**
* Represents the thickness ambient factor.
*
* @type {Node<float>?}
*/
this.thicknessAmbientNode = float( 0.0 );
/**
* Represents the thickness attenuation.
*
* @type {Node<float>?}
*/
this.thicknessAttenuationNode = float( .1 );
/**
* Represents the thickness power.
*
* @type {Node<float>?}
*/
this.thicknessPowerNode = float( 2.0 );
/**
* Represents the thickness scale.
*
* @type {Node<float>?}
*/
this.thicknessScaleNode = float( 10.0 );
}
/**
* Whether the lighting model should use SSS or not.
*
* @type {Boolean}
* @default true
*/
get useSSS() {
return this.thicknessColorNode !== null;
}
/**
* Setups the lighting model.
*
* @return {SSSLightingModel} The lighting model.
*/
setupLightingModel( /*builder*/ ) {
return new SSSLightingModel( this.useClearcoat, this.useSheen, this.useIridescence, this.useAnisotropy, this.useTransmission, this.useDispersion, this.useSSS );
}
copy( source ) {
this.thicknessColorNode = source.thicknessColorNode;
this.thicknessDistortionNode = source.thicknessDistortionNode;
this.thicknessAmbientNode = source.thicknessAmbientNode;
this.thicknessAttenuationNode = source.thicknessAttenuationNode;
this.thicknessPowerNode = source.thicknessPowerNode;
this.thicknessScaleNode = source.thicknessScaleNode;
return super.copy( source );
}
}
const getGradientIrradiance = /*@__PURE__*/ Fn( ( { normal, lightDirection, builder } ) => {
// dotNL will be from -1.0 to 1.0
const dotNL = normal.dot( lightDirection );
const coord = vec2( dotNL.mul( 0.5 ).add( 0.5 ), 0.0 );
if ( builder.material.gradientMap ) {
const gradientMap = materialReference( 'gradientMap', 'texture' ).context( { getUV: () => coord } );
return vec3( gradientMap.r );
} else {
const fw = coord.fwidth().mul( 0.5 );
return mix( vec3( 0.7 ), vec3( 1.0 ), smoothstep( float( 0.7 ).sub( fw.x ), float( 0.7 ).add( fw.x ), coord.x ) );
}
} );
/**
* Represents the lighting model for a toon material. Used in {@link MeshToonNodeMaterial}.
*
* @augments LightingModel
*/
class ToonLightingModel extends LightingModel {
/**
* Implements the direct lighting. Instead of using a conventional smooth irradiance, the irradiance is
* reduced to a small number of discrete shades to create a comic-like, flat look.
*
* @param {Object} input - The input data.
* @param {StackNode} stack - The current stack.
* @param {NodeBuilder} builder - The current node builder.
*/
direct( { lightDirection, lightColor, reflectedLight }, stack, builder ) {
const irradiance = getGradientIrradiance( { normal: normalGeometry, lightDirection, builder } ).mul( lightColor );
reflectedLight.directDiffuse.addAssign( irradiance.mul( BRDF_Lambert( { diffuseColor: diffuseColor.rgb } ) ) );
}
/**
* Implements the indirect lighting.
*
* @param {ContextNode} input - The current node context.
* @param {StackNode} stack - The current stack.
* @param {NodeBuilder} builder - The current node builder.
*/
indirect( { ambientOcclusion, irradiance, reflectedLight } ) {
reflectedLight.indirectDiffuse.addAssign( irradiance.mul( BRDF_Lambert( { diffuseColor } ) ) );
reflectedLight.indirectDiffuse.mulAssign( ambientOcclusion );
}
}
const _defaultValues$4 = /*@__PURE__*/ new MeshToonMaterial();
/**
* Node material version of `MeshToonMaterial`.
*
* @augments NodeMaterial
*/
class MeshToonNodeMaterial extends NodeMaterial {
static get type() {
return 'MeshToonNodeMaterial';
}
/**
* Constructs a new mesh toon node material.
*
* @param {Object?} parameters - The configuration parameter.
*/
constructor( parameters ) {
super();
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isMeshToonNodeMaterial = true;
/**
* Set to `true` because toon materials react on lights.
*
* @type {Boolean}
* @default true
*/
this.lights = true;
this.setDefaultValues( _defaultValues$4 );
this.setValues( parameters );
}
/**
* Setups the lighting model.
*
* @return {ToonLightingModel} The lighting model.
*/
setupLightingModel( /*builder*/ ) {
return new ToonLightingModel();
}
}
/** @module MatcapUVNode **/
/**
* Can be used to compute texture coordinates for projecting a
* matcap onto a mesh. Used by {@link MeshMatcapNodeMaterial}.
*
* @augments TempNode
*/
class MatcapUVNode extends TempNode {
static get type() {
return 'MatcapUVNode';
}
/**
* Constructs a new matcap uv node.
*/
constructor() {
super( 'vec2' );
}
setup() {
const x = vec3( positionViewDirection.z, 0, positionViewDirection.x.negate() ).normalize();
const y = positionViewDirection.cross( x );
return vec2( x.dot( transformedNormalView ), y.dot( transformedNormalView ) ).mul( 0.495 ).add( 0.5 ); // 0.495 to remove artifacts caused by undersized matcap disks
}
}
/**
* TSL function for creating a matcap uv node.
*
* @function
* @returns {MatcapUVNode}
*/
const matcapUV = /*@__PURE__*/ nodeImmutable( MatcapUVNode );
const _defaultValues$3 = /*@__PURE__*/ new MeshMatcapMaterial();
/**
* Node material version of `MeshMatcapMaterial`.
*
* @augments NodeMaterial
*/
class MeshMatcapNodeMaterial extends NodeMaterial {
static get type() {
return 'MeshMatcapNodeMaterial';
}
/**
* Constructs a new mesh normal node material.
*
* @param {Object?} parameters - The configuration parameter.
*/
constructor( parameters ) {
super();
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isMeshMatcapNodeMaterial = true;
this.setDefaultValues( _defaultValues$3 );
this.setValues( parameters );
}
/**
* Setups the matcap specific node variables.
*
* @param {NodeBuilder} builder - The current node builder.
*/
setupVariants( builder ) {
const uv = matcapUV;
let matcapColor;
if ( builder.material.matcap ) {
matcapColor = materialReference( 'matcap', 'texture' ).context( { getUV: () => uv } );
} else {
matcapColor = vec3( mix( 0.2, 0.8, uv.y ) ); // default if matcap is missing
}
diffuseColor.rgb.mulAssign( matcapColor.rgb );
}
}
/** @module RotateNode **/
/**
* Applies a rotation to the given position node.
*
* @augments TempNode
*/
class RotateNode extends TempNode {
static get type() {
return 'RotateNode';
}
/**
* Constructs a new rotate node.
*
* @param {Node} positionNode - The position node.
* @param {Node} rotationNode - Represents the rotation that is applied to the position node. Depending
* on whether the position data are 2D or 3D, the rotation is expressed a single float value or an Euler value.
*/
constructor( positionNode, rotationNode ) {
super();
/**
* The position node.
*
* @type {Node}
*/
this.positionNode = positionNode;
/**
* Represents the rotation that is applied to the position node.
* Depending on whether the position data are 2D or 3D, the rotation is expressed a single float value or an Euler value.
*
* @type {Node}
*/
this.rotationNode = rotationNode;
}
/**
* The type of the {@link RotateNode#positionNode} defines the node's type.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The node's type.
*/
getNodeType( builder ) {
return this.positionNode.getNodeType( builder );
}
setup( builder ) {
const { rotationNode, positionNode } = this;
const nodeType = this.getNodeType( builder );
if ( nodeType === 'vec2' ) {
const cosAngle = rotationNode.cos();
const sinAngle = rotationNode.sin();
const rotationMatrix = mat2(
cosAngle, sinAngle,
sinAngle.negate(), cosAngle
);
return rotationMatrix.mul( positionNode );
} else {
const rotation = rotationNode;
const rotationXMatrix = mat4( vec4( 1.0, 0.0, 0.0, 0.0 ), vec4( 0.0, cos( rotation.x ), sin( rotation.x ).negate(), 0.0 ), vec4( 0.0, sin( rotation.x ), cos( rotation.x ), 0.0 ), vec4( 0.0, 0.0, 0.0, 1.0 ) );
const rotationYMatrix = mat4( vec4( cos( rotation.y ), 0.0, sin( rotation.y ), 0.0 ), vec4( 0.0, 1.0, 0.0, 0.0 ), vec4( sin( rotation.y ).negate(), 0.0, cos( rotation.y ), 0.0 ), vec4( 0.0, 0.0, 0.0, 1.0 ) );
const rotationZMatrix = mat4( vec4( cos( rotation.z ), sin( rotation.z ).negate(), 0.0, 0.0 ), vec4( sin( rotation.z ), cos( rotation.z ), 0.0, 0.0 ), vec4( 0.0, 0.0, 1.0, 0.0 ), vec4( 0.0, 0.0, 0.0, 1.0 ) );
return rotationXMatrix.mul( rotationYMatrix ).mul( rotationZMatrix ).mul( vec4( positionNode, 1.0 ) ).xyz;
}
}
}
/**
* TSL function for creating a rotate node.
*
* @function
* @param {Node} positionNode - The position node.
* @param {Node} rotationNode - Represents the rotation that is applied to the position node. Depending
* on whether the position data are 2D or 3D, the rotation is expressed a single float value or an Euler value.
* @returns {RotateNode}
*/
const rotate = /*@__PURE__*/ nodeProxy( RotateNode );
const _defaultValues$2 = /*@__PURE__*/ new SpriteMaterial();
/**
* Node material version of `SpriteMaterial`.
*
* @augments NodeMaterial
*/
class SpriteNodeMaterial extends NodeMaterial {
static get type() {
return 'SpriteNodeMaterial';
}
/**
* Constructs a new sprite node material.
*
* @param {Object?} parameters - The configuration parameter.
*/
constructor( parameters ) {
super();
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isSpriteNodeMaterial = true;
this._useSizeAttenuation = true;
/**
* This property makes it possible to define the position of the sprite with a
* node. That can be useful when the material is used with instanced rendering
* and node data are defined with an instanced attribute node:
* ```js
* const positionAttribute = new InstancedBufferAttribute( new Float32Array( positions ), 3 );
* material.positionNode = instancedBufferAttribute( positionAttribute );
* ```
* Another possibility is to compute the instanced data with a compute shader:
* ```js
* const positionBuffer = instancedArray( particleCount, 'vec3' );
* particleMaterial.positionNode = positionBuffer.toAttribute();
* ```
*
* @type {Node<vec2>?}
* @default null
*/
this.positionNode = null;
/**
* The rotation of sprite materials is by default inferred from the `rotation`,
* property. This node property allows to overwrite the default and define
* the rotation with a node instead.
*
* If you don't want to overwrite the rotation but modify the existing
* value instead, use {@link module:MaterialNode.materialRotation}.
*
* @type {Node<float>?}
* @default null
*/
this.rotationNode = null;
/**
* This node property provides an additional way to scale sprites next to
* `Object3D.scale`. The scale transformation based in `Object3D.scale`
* is multiplied with the scale value of this node in the vertex shader.
*
* @type {Node<vec2>?}
* @default null
*/
this.scaleNode = null;
this.setDefaultValues( _defaultValues$2 );
this.setValues( parameters );
}
/**
* Setups the position node in view space. This method implements
* the sprite specific vertex shader.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {Node<vec3>} The position in view space.
*/
setupPositionView( builder ) {
const { object, camera } = builder;
const sizeAttenuation = this.sizeAttenuation;
const { positionNode, rotationNode, scaleNode } = this;
const mvPosition = modelViewMatrix.mul( vec3( positionNode || 0 ) );
let scale = vec2( modelWorldMatrix[ 0 ].xyz.length(), modelWorldMatrix[ 1 ].xyz.length() );
if ( scaleNode !== null ) {
scale = scale.mul( float( scaleNode ) );
}
if ( sizeAttenuation === false ) {
if ( camera.isPerspectiveCamera ) {
scale = scale.mul( mvPosition.z.negate() );
} else {
const orthoScale = float( 2.0 ).div( cameraProjectionMatrix.element( 1 ).element( 1 ) );
scale = scale.mul( orthoScale.mul( 2 ) );
}
}
let alignedPosition = positionGeometry.xy;
if ( object.center && object.center.isVector2 === true ) {
const center = reference$1( 'center', 'vec2', object );
alignedPosition = alignedPosition.sub( center.sub( 0.5 ) );
}
alignedPosition = alignedPosition.mul( scale );
const rotation = float( rotationNode || materialRotation );
const rotatedPosition = rotate( alignedPosition, rotation );
return vec4( mvPosition.xy.add( rotatedPosition ), mvPosition.zw );
}
copy( source ) {
this.positionNode = source.positionNode;
this.rotationNode = source.rotationNode;
this.scaleNode = source.scaleNode;
return super.copy( source );
}
/**
* Whether to use size attenuation or not.
*
* @type {Boolean}
* @default true
*/
get sizeAttenuation() {
return this._useSizeAttenuation;
}
set sizeAttenuation( value ) {
if ( this._useSizeAttenuation !== value ) {
this._useSizeAttenuation = value;
this.needsUpdate = true;
}
}
}
const _defaultValues$1 = /*@__PURE__*/ new PointsMaterial();
/**
* Node material version of `PointsMaterial`.
*
* @augments NodeMaterial
*/
class PointsNodeMaterial extends SpriteNodeMaterial {
static get type() {
return 'PointsNodeMaterial';
}
/**
* Constructs a new points node material.
*
* @param {Object?} parameters - The configuration parameter.
*/
constructor( parameters ) {
super();
/**
* This node property provides an additional way to set the point size.
*
* @type {Node<vec2>?}
* @default null
*/
this.sizeNode = null;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isPointsNodeMaterial = true;
this.setDefaultValues( _defaultValues$1 );
this.setValues( parameters );
}
setupPositionView() {
const { positionNode } = this;
return modelViewMatrix.mul( vec3( positionNode || positionLocal ) ).xyz;
}
setupVertex( builder ) {
const mvp = super.setupVertex( builder );
// skip further processing if the material is not a node material
if ( builder.material.isNodeMaterial !== true ) {
return mvp;
}
// ndc space
const { rotationNode, scaleNode, sizeNode } = this;
const alignedPosition = positionGeometry.xy.toVar();
const aspect = viewport.z.div( viewport.w );
// rotation
if ( rotationNode && rotationNode.isNode ) {
const rotation = float( rotationNode );
alignedPosition.assign( rotate( alignedPosition, rotation ) );
}
// point size
let pointSize = sizeNode !== null ? vec2( sizeNode ) : materialPointSize;
if ( this.sizeAttenuation === true ) {
pointSize = pointSize.mul( pointSize.div( positionView.z.negate() ) );
}
// scale
if ( scaleNode && scaleNode.isNode ) {
pointSize = pointSize.mul( vec2( scaleNode ) );
}
alignedPosition.mulAssign( pointSize.mul( 2 ) );
alignedPosition.assign( alignedPosition.div( viewport.z ) );
alignedPosition.y.assign( alignedPosition.y.mul( aspect ) );
// back to clip space
alignedPosition.assign( alignedPosition.mul( mvp.w ) );
//clipPos.xy += offset;
mvp.addAssign( vec4( alignedPosition, 0, 0 ) );
return mvp;
}
/**
* Whether alpha to coverage should be used or not.
*
* @type {Boolean}
* @default true
*/
get alphaToCoverage() {
return this._useAlphaToCoverage;
}
set alphaToCoverage( value ) {
if ( this._useAlphaToCoverage !== value ) {
this._useAlphaToCoverage = value;
this.needsUpdate = true;
}
}
}
/**
* Represents lighting model for a shadow material. Used in {@link ShadowNodeMaterial}.
*
* @augments LightingModel
*/
class ShadowMaskModel extends LightingModel {
/**
* Constructs a new shadow mask model.
*/
constructor() {
super();
/**
* The shadow mask node.
*
* @type {Node}
*/
this.shadowNode = float( 1 ).toVar( 'shadowMask' );
}
/**
* Only used to save the shadow mask.
*
* @param {Object} input - The input data.
*/
direct( { shadowMask } ) {
this.shadowNode.mulAssign( shadowMask );
}
/**
* Uses the shadow mask to produce the final color.
*
* @param {ContextNode} context - The current node context.
*/
finish( context ) {
diffuseColor.a.mulAssign( this.shadowNode.oneMinus() );
context.outgoingLight.rgb.assign( diffuseColor.rgb ); // TODO: Optimize LightsNode to avoid this assignment
}
}
const _defaultValues = /*@__PURE__*/ new ShadowMaterial();
/**
* Node material version of `ShadowMaterial`.
*
* @augments NodeMaterial
*/
class ShadowNodeMaterial extends NodeMaterial {
static get type() {
return 'ShadowNodeMaterial';
}
/**
* Constructs a new shadow node material.
*
* @param {Object?} parameters - The configuration parameter.
*/
constructor( parameters ) {
super();
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isShadowNodeMaterial = true;
/**
* Set to `true` because so it's possible to implement
* the shadow mask effect.
*
* @type {Boolean}
* @default true
*/
this.lights = true;
this.setDefaultValues( _defaultValues );
this.setValues( parameters );
}
/**
* Setups the lighting model.
*
* @return {ShadowMaskModel} The lighting model.
*/
setupLightingModel( /*builder*/ ) {
return new ShadowMaskModel();
}
}
/** @module Texture3DNode **/
const normal = Fn( ( { texture, uv } ) => {
const epsilon = 0.0001;
const ret = vec3().toVar();
If( uv.x.lessThan( epsilon ), () => {
ret.assign( vec3( 1, 0, 0 ) );
} ).ElseIf( uv.y.lessThan( epsilon ), () => {
ret.assign( vec3( 0, 1, 0 ) );
} ).ElseIf( uv.z.lessThan( epsilon ), () => {
ret.assign( vec3( 0, 0, 1 ) );
} ).ElseIf( uv.x.greaterThan( 1 - epsilon ), () => {
ret.assign( vec3( - 1, 0, 0 ) );
} ).ElseIf( uv.y.greaterThan( 1 - epsilon ), () => {
ret.assign( vec3( 0, - 1, 0 ) );
} ).ElseIf( uv.z.greaterThan( 1 - epsilon ), () => {
ret.assign( vec3( 0, 0, - 1 ) );
} ).Else( () => {
const step = 0.01;
const x = texture.sample( uv.add( vec3( - step, 0.0, 0.0 ) ) ).r.sub( texture.sample( uv.add( vec3( step, 0.0, 0.0 ) ) ).r );
const y = texture.sample( uv.add( vec3( 0.0, - step, 0.0 ) ) ).r.sub( texture.sample( uv.add( vec3( 0.0, step, 0.0 ) ) ).r );
const z = texture.sample( uv.add( vec3( 0.0, 0.0, - step ) ) ).r.sub( texture.sample( uv.add( vec3( 0.0, 0.0, step ) ) ).r );
ret.assign( vec3( x, y, z ) );
} );
return ret.normalize();
} );
/**
* This type of uniform node represents a 3D texture.
*
* @augments module:TextureNode~TextureNode
*/
class Texture3DNode extends TextureNode {
static get type() {
return 'Texture3DNode';
}
/**
* Constructs a new 3D texture node.
*
* @param {Data3DTexture} value - The 3D texture.
* @param {Node<vec2|vec3>?} [uvNode=null] - The uv node.
* @param {Node<int>?} [levelNode=null] - The level node.
*/
constructor( value, uvNode = null, levelNode = null ) {
super( value, uvNode, levelNode );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isTexture3DNode = true;
}
/**
* Overwrites the default implementation to return a fixed value `'texture3D'`.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The input type.
*/
getInputType( /*builder*/ ) {
return 'texture3D';
}
/**
* Returns a default uv node which is in context of 3D textures a three-dimensional
* uv node.
*
* @return {Node<vec3>} The default uv node.
*/
getDefaultUV() {
return vec3( 0.5, 0.5, 0.5 );
}
/**
* Overwritten with an empty implementation since the `updateMatrix` flag is ignored
* for 3D textures. The uv transformation matrix is not applied to 3D textures.
*
* @param {Boolean} value - The update toggle.
*/
setUpdateMatrix( /*value*/ ) { } // Ignore .updateMatrix for 3d TextureNode
/**
* Overwrites the default implementation to return the unmodified uv node.
*
* @param {NodeBuilder} builder - The current node builder.
* @param {Node} uvNode - The uv node to setup.
* @return {Node} The unmodified uv node.
*/
setupUV( builder, uvNode ) {
const texture = this.value;
if ( builder.isFlipY() && ( texture.isRenderTargetTexture === true || texture.isFramebufferTexture === true ) ) {
if ( this.sampler ) {
uvNode = uvNode.flipY();
} else {
uvNode = uvNode.setY( int( textureSize( this, this.levelNode ).y ).sub( uvNode.y ).sub( 1 ) );
}
}
return uvNode;
}
/**
* Generates the uv code snippet.
*
* @param {NodeBuilder} builder - The current node builder.
* @param {Node} uvNode - The uv node to generate code for.
* @return {String} The generated code snippet.
*/
generateUV( builder, uvNode ) {
return uvNode.build( builder, 'vec3' );
}
/**
* TODO.
*
* @param {Node<vec3>} uvNode - The uv node .
* @return {Node<vec3>} TODO.
*/
normal( uvNode ) {
return normal( { texture: this, uv: uvNode } );
}
}
/**
* TSL function for creating a 3D texture node.
*
* @function
* @param {Data3DTexture} value - The 3D texture.
* @param {Node<vec2|vec3>?} [uvNode=null] - The uv node.
* @param {Node<int>?} [levelNode=null] - The level node.
* @returns {Texture3DNode}
*/
const texture3D = /*@__PURE__*/ nodeProxy( Texture3DNode );
/** @module VolumeNodeMaterial **/
/**
* Node material intended for volume rendering. The volumetric data are
* defined with an instance of {@link Data3DTexture}.
*
* @augments NodeMaterial
*/
class VolumeNodeMaterial extends NodeMaterial {
static get type() {
return 'VolumeNodeMaterial';
}
/**
* Constructs a new volume node material.
*
* @param {Object?} parameters - The configuration parameter.
*/
constructor( parameters ) {
super();
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isVolumeNodeMaterial = true;
/**
* The base color of the volume.
*
* @type {Color}
* @default 100
*/
this.base = new Color( 0xffffff );
/**
* A 3D data texture holding the volumetric data.
*
* @type {Data3DTexture?}
* @default null
*/
this.map = null;
/**
* This number of samples for each ray that hits the mesh's surface
* and travels through the volume.
*
* @type {Number}
* @default 100
*/
this.steps = 100;
/**
* Callback for {@link VolumeNodeMaterial#testNode}.
*
* @callback testNodeCallback
* @param {Data3DTexture<float>} map - The 3D texture.
* @param {Node<float>} mapValue - The sampled value inside the volume.
* @param {Node<vec3>} probe - The probe which is the entry point of the ray on the mesh's surface.
* @param {Node<vec4>} finalColor - The final color.
*/
/**
* The volume rendering of this material works by shooting rays
* from the camera position through each fragment of the mesh's
* surface and sample the inner volume in a raymarching fashion
* multiple times.
*
* This node can be used to assign a callback function of type `Fn`
* that will be executed per sample. The callback receives the
* texture, the sampled texture value as well as position on the surface
* where the rays enters the volume. The last parameter is a color
* that allows the callback to determine the final color.
*
* @type {testNodeCallback?}
* @default null
*/
this.testNode = null;
this.setValues( parameters );
}
/**
* Setups the vertex and fragment stage of this node material.
*
* @param {NodeBuilder} builder - The current node builder.
*/
setup( builder ) {
const map = texture3D( this.map, null, 0 );
const hitBox = Fn( ( { orig, dir } ) => {
const box_min = vec3( - 0.5 );
const box_max = vec3( 0.5 );
const inv_dir = dir.reciprocal();
const tmin_tmp = box_min.sub( orig ).mul( inv_dir );
const tmax_tmp = box_max.sub( orig ).mul( inv_dir );
const tmin = min$1( tmin_tmp, tmax_tmp );
const tmax = max$1( tmin_tmp, tmax_tmp );
const t0 = max$1( tmin.x, max$1( tmin.y, tmin.z ) );
const t1 = min$1( tmax.x, min$1( tmax.y, tmax.z ) );
return vec2( t0, t1 );
} );
this.fragmentNode = Fn( () => {
const vOrigin = varying( vec3( modelWorldMatrixInverse.mul( vec4( cameraPosition, 1.0 ) ) ) );
const vDirection = varying( positionGeometry.sub( vOrigin ) );
const rayDir = vDirection.normalize();
const bounds = vec2( hitBox( { orig: vOrigin, dir: rayDir } ) ).toVar();
bounds.x.greaterThan( bounds.y ).discard();
bounds.assign( vec2( max$1( bounds.x, 0.0 ), bounds.y ) );
const p = vec3( vOrigin.add( bounds.x.mul( rayDir ) ) ).toVar();
const inc = vec3( rayDir.abs().reciprocal() ).toVar();
const delta = float( min$1( inc.x, min$1( inc.y, inc.z ) ) ).toVar( 'delta' ); // used 'delta' name in loop
delta.divAssign( materialReference( 'steps', 'float' ) );
const ac = vec4( materialReference( 'base', 'color' ), 0.0 ).toVar();
Loop( { type: 'float', start: bounds.x, end: bounds.y, update: '+= delta' }, () => {
const d = property( 'float', 'd' ).assign( map.sample( p.add( 0.5 ) ).r );
if ( this.testNode !== null ) {
this.testNode( { map: map, mapValue: d, probe: p, finalColor: ac } ).append();
} else {
// default to show surface of mesh
ac.a.assign( 1 );
Break();
}
p.addAssign( rayDir.mul( delta ) );
} );
ac.a.equal( 0 ).discard();
return vec4( ac );
} )();
super.setup( builder );
}
}
/**
* This module manages the internal animation loop of the renderer.
*
* @private
*/
class Animation {
/**
* Constructs a new animation loop management component.
*
* @param {Nodes} nodes - Renderer component for managing nodes related logic.
* @param {Info} info - Renderer component for managing metrics and monitoring data.
*/
constructor( nodes, info ) {
/**
* Renderer component for managing nodes related logic.
*
* @type {Nodes}
*/
this.nodes = nodes;
/**
* Renderer component for managing metrics and monitoring data.
*
* @type {Info}
*/
this.info = info;
/**
* A reference to the context from `requestAnimationFrame()` can
* be called (usually `window`).
*
* @type {Window|XRSession}
*/
this._context = self;
/**
* The user-defined animation loop.
*
* @type {Function?}
* @default null
*/
this._animationLoop = null;
/**
* The requestId which is returned from the `requestAnimationFrame()` call.
* Can be used to cancel the stop the animation loop.
*
* @type {Number?}
* @default null
*/
this._requestId = null;
}
/**
* Starts the internal animation loop.
*/
start() {
const update = ( time, frame ) => {
this._requestId = this._context.requestAnimationFrame( update );
if ( this.info.autoReset === true ) this.info.reset();
this.nodes.nodeFrame.update();
this.info.frame = this.nodes.nodeFrame.frameId;
if ( this._animationLoop !== null ) this._animationLoop( time, frame );
};
update();
}
/**
* Stops the internal animation loop.
*/
stop() {
this._context.cancelAnimationFrame( this._requestId );
this._requestId = null;
}
/**
* Returns the user-level animation loop.
*
* @return {Function} The animation loop.
*/
getAnimationLoop() {
return this._animationLoop;
}
/**
* Defines the user-level animation loop.
*
* @param {Function} callback - The animation loop.
*/
setAnimationLoop( callback ) {
this._animationLoop = callback;
}
/**
* Returns the animation context.
*
* @return {Window|XRSession} The animation context.
*/
getContext() {
return this._context;
}
/**
* Defines the context in which `requestAnimationFrame()` is executed.
*
* @param {Window|XRSession} context - The context to set.
*/
setContext( context ) {
this._context = context;
}
/**
* Frees all internal resources and stops the animation loop.
*/
dispose() {
this.stop();
}
}
/**
* Data structure for the renderer. It allows defining values
* with chained, hierarchical keys. Keys are meant to be
* objects since the module internally works with Weak Maps
* for performance reasons.
*
* @private
*/
class ChainMap {
/**
* Constructs a new Chain Map.
*/
constructor() {
/**
* The root Weak Map.
*
* @type {WeakMap}
*/
this.weakMap = new WeakMap();
}
/**
* Returns the value for the given array of keys.
*
* @param {Array<Object>} keys - List of keys.
* @return {Any} The value. Returns `undefined` if no value was found.
*/
get( keys ) {
let map = this.weakMap;
for ( let i = 0; i < keys.length - 1; i ++ ) {
map = map.get( keys[ i ] );
if ( map === undefined ) return undefined;
}
return map.get( keys[ keys.length - 1 ] );
}
/**
* Sets the value for the given keys.
*
* @param {Array<Object>} keys - List of keys.
* @param {Any} value - The value to set.
* @return {ChainMap} A reference to this Chain Map.
*/
set( keys, value ) {
let map = this.weakMap;
for ( let i = 0; i < keys.length - 1; i ++ ) {
const key = keys[ i ];
if ( map.has( key ) === false ) map.set( key, new WeakMap() );
map = map.get( key );
}
map.set( keys[ keys.length - 1 ], value );
return this;
}
/**
* Deletes a value for the given keys.
*
* @param {Array<Object>} keys - The keys.
* @return {Boolean} Returns `true` if the value has been removed successfully and `false` if the value has not be found.
*/
delete( keys ) {
let map = this.weakMap;
for ( let i = 0; i < keys.length - 1; i ++ ) {
map = map.get( keys[ i ] );
if ( map === undefined ) return false;
}
return map.delete( keys[ keys.length - 1 ] );
}
}
let _id$9 = 0;
function getKeys( obj ) {
const keys = Object.keys( obj );
let proto = Object.getPrototypeOf( obj );
while ( proto ) {
const descriptors = Object.getOwnPropertyDescriptors( proto );
for ( const key in descriptors ) {
if ( descriptors[ key ] !== undefined ) {
const descriptor = descriptors[ key ];
if ( descriptor && typeof descriptor.get === 'function' ) {
keys.push( key );
}
}
}
proto = Object.getPrototypeOf( proto );
}
return keys;
}
/**
* A render object is the renderer's representation of single entity that gets drawn
* with a draw command. There is no unique mapping of render objects to 3D objects in the
* scene since render objects also depend from the used material, the current render context
* and the current scene's lighting.
*
* In general, the basic process of the renderer is:
*
* - Analyze the 3D objects in the scene and generate render lists containing render items.
* - Process the render lists by calling one or more render commands for each render item.
* - For each render command, request a render object and perform the draw.
*
* The module provides an interface to get data required for the draw command like the actual
* draw parameters or vertex buffers. It also holds a series of caching related methods since
* creating render objects should only be done when necessary.
*
* @private
*/
class RenderObject {
/**
* Constructs a new render object.
*
* @param {Nodes} nodes - Renderer component for managing nodes related logic.
* @param {Geometries} geometries - Renderer component for managing geometries.
* @param {Renderer} renderer - The renderer.
* @param {Object3D} object - The 3D object.
* @param {Material} material - The 3D object's material.
* @param {Scene} scene - The scene the 3D object belongs to.
* @param {Camera} camera - The camera the object should be rendered with.
* @param {LightsNode} lightsNode - The lights node.
* @param {RenderContext} renderContext - The render context.
* @param {ClippingContext} clippingContext - The clipping context.
*/
constructor( nodes, geometries, renderer, object, material, scene, camera, lightsNode, renderContext, clippingContext ) {
this.id = _id$9 ++;
/**
* Renderer component for managing nodes related logic.
*
* @type {Nodes}
* @private
*/
this._nodes = nodes;
/**
* Renderer component for managing geometries.
*
* @type {Geometries}
* @private
*/
this._geometries = geometries;
/**
* The renderer.
*
* @type {Renderer}
*/
this.renderer = renderer;
/**
* The 3D object.
*
* @type {Object3D}
*/
this.object = object;
/**
* The 3D object's material.
*
* @type {Material}
*/
this.material = material;
/**
* The scene the 3D object belongs to.
*
* @type {Scene}
*/
this.scene = scene;
/**
* The camera the 3D object should be rendered with.
*
* @type {Camera}
*/
this.camera = camera;
/**
* The lights node.
*
* @type {LightsNode}
*/
this.lightsNode = lightsNode;
/**
* The render context.
*
* @type {RenderContext}
*/
this.context = renderContext;
/**
* The 3D object's geometry.
*
* @type {BufferGeometry}
*/
this.geometry = object.geometry;
/**
* The render object's version.
*
* @type {Number}
*/
this.version = material.version;
/**
* The draw range of the geometry.
*
* @type {Object?}
* @default null
*/
this.drawRange = null;
/**
* An array holding the buffer attributes
* of the render object. This entails attribute
* definitions on geometry and node level.
*
* @type {Array<BufferAttribute>?}
* @default null
*/
this.attributes = null;
/**
* A reference to a render pipeline the render
* object is processed with.
*
* @type {RenderPipeline}
* @default null
*/
this.pipeline = null;
/**
* Only relevant for objects using
* multiple materials. This represents a group entry
* from the respective `BufferGeometry`.
*
* @type {{start: Number, count: Number}?}
* @default null
*/
this.group = null;
/**
* An array holding the vertex buffers which can
* be buffer attributes but also interleaved buffers.
*
* @type {Array<BufferAttribute|InterleavedBuffer>?}
* @default null
*/
this.vertexBuffers = null;
/**
* The parameters for the draw command.
*
* @type {Object?}
* @default null
*/
this.drawParams = null;
/**
* If this render object is used inside a render bundle,
* this property points to the respective bundle group.
*
* @type {BundleGroup?}
* @default null
*/
this.bundle = null;
/**
* The clipping context.
*
* @type {ClippingContext}
*/
this.clippingContext = clippingContext;
/**
* The clipping context's cache key.
*
* @type {String}
*/
this.clippingContextCacheKey = clippingContext !== null ? clippingContext.cacheKey : '';
/**
* The initial node cache key.
*
* @type {Number}
*/
this.initialNodesCacheKey = this.getDynamicCacheKey();
/**
* The initial cache key.
*
* @type {Number}
*/
this.initialCacheKey = this.getCacheKey();
/**
* The node builder state.
*
* @type {NodeBuilderState?}
* @private
* @default null
*/
this._nodeBuilderState = null;
/**
* An array of bindings.
*
* @type {Array<BindGroup>?}
* @private
* @default null
*/
this._bindings = null;
/**
* Reference to the node material observer.
*
* @type {NodeMaterialObserver?}
* @private
* @default null
*/
this._monitor = null;
/**
* An event listener which is defined by `RenderObjects`. It performs
* clean up tasks when `dispose()` on this render object.
*
* @method
*/
this.onDispose = null;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isRenderObject = true;
/**
* An event listener which is executed when `dispose()` is called on
* the render object's material.
*
* @method
*/
this.onMaterialDispose = () => {
this.dispose();
};
this.material.addEventListener( 'dispose', this.onMaterialDispose );
}
/**
* Updates the clipping context.
*
* @param {ClippingContext} context - The clipping context to set.
*/
updateClipping( context ) {
this.clippingContext = context;
}
/**
* Whether the clipping requires an update or not.
*
* @type {Boolean}
* @readonly
*/
get clippingNeedsUpdate() {
if ( this.clippingContext === null || this.clippingContext.cacheKey === this.clippingContextCacheKey ) return false;
this.clippingContextCacheKey = this.clippingContext.cacheKey;
return true;
}
/**
* The number of clipping planes defined in context of hardware clipping.
*
* @type {Number}
* @readonly
*/
get hardwareClippingPlanes() {
return this.material.hardwareClipping === true ? this.clippingContext.unionClippingCount : 0;
}
/**
* Returns the node builder state of this render object.
*
* @return {NodeBuilderState} The node builder state.
*/
getNodeBuilderState() {
return this._nodeBuilderState || ( this._nodeBuilderState = this._nodes.getForRender( this ) );
}
/**
* Returns the node material observer of this render object.
*
* @return {NodeMaterialObserver} The node material observer.
*/
getMonitor() {
return this._monitor || ( this._monitor = this.getNodeBuilderState().observer );
}
/**
* Returns an array of bind groups of this render object.
*
* @return {Array<BindGroup>} The bindings.
*/
getBindings() {
return this._bindings || ( this._bindings = this.getNodeBuilderState().createBindings() );
}
/**
* Returns a binding group by group name of this render object.
*
* @param {String} name - The name of the binding group.
* @return {BindGroup?} The bindings.
*/
getBindingGroup( name ) {
for ( const bindingGroup of this.getBindings() ) {
if ( bindingGroup.name === name ) {
return bindingGroup;
}
}
}
/**
* Returns the index of the render object's geometry.
*
* @return {BufferAttribute?} The index. Returns `null` for non-indexed geometries.
*/
getIndex() {
return this._geometries.getIndex( this );
}
/**
* Returns the indirect buffer attribute.
*
* @return {BufferAttribute?} The indirect attribute. `null` if no indirect drawing is used.
*/
getIndirect() {
return this._geometries.getIndirect( this );
}
/**
* Returns an array that acts as a key for identifying the render object in a chain map.
*
* @return {Array<Object>} An array with object references.
*/
getChainArray() {
return [ this.object, this.material, this.context, this.lightsNode ];
}
/**
* This method is used when the geometry of a 3D object has been exchanged and the
* respective render object now requires an update.
*
* @param {BufferGeometry} geometry - The geometry to set.
*/
setGeometry( geometry ) {
this.geometry = geometry;
this.attributes = null;
}
/**
* Returns the buffer attributes of the render object. The returned array holds
* attribute definitions on geometry and node level.
*
* @return {Array<BufferAttribute>} An array with buffer attributes.
*/
getAttributes() {
if ( this.attributes !== null ) return this.attributes;
const nodeAttributes = this.getNodeBuilderState().nodeAttributes;
const geometry = this.geometry;
const attributes = [];
const vertexBuffers = new Set();
for ( const nodeAttribute of nodeAttributes ) {
const attribute = nodeAttribute.node && nodeAttribute.node.attribute ? nodeAttribute.node.attribute : geometry.getAttribute( nodeAttribute.name );
if ( attribute === undefined ) continue;
attributes.push( attribute );
const bufferAttribute = attribute.isInterleavedBufferAttribute ? attribute.data : attribute;
vertexBuffers.add( bufferAttribute );
}
this.attributes = attributes;
this.vertexBuffers = Array.from( vertexBuffers.values() );
return attributes;
}
/**
* Returns the vertex buffers of the render object.
*
* @return {Array<BufferAttribute|InterleavedBuffer>} An array with buffer attribute or interleaved buffers.
*/
getVertexBuffers() {
if ( this.vertexBuffers === null ) this.getAttributes();
return this.vertexBuffers;
}
/**
* Returns the draw parameters for the render object.
*
* @return {{vertexCount: Number, firstVertex: Number, instanceCount: Number, firstInstance: Number}} The draw parameters.
*/
getDrawParameters() {
const { object, material, geometry, group, drawRange } = this;
const drawParams = this.drawParams || ( this.drawParams = {
vertexCount: 0,
firstVertex: 0,
instanceCount: 0,
firstInstance: 0
} );
const index = this.getIndex();
const hasIndex = ( index !== null );
const instanceCount = geometry.isInstancedBufferGeometry ? geometry.instanceCount : ( object.count > 1 ? object.count : 1 );
if ( instanceCount === 0 ) return null;
drawParams.instanceCount = instanceCount;
if ( object.isBatchedMesh === true ) return drawParams;
let rangeFactor = 1;
if ( material.wireframe === true && ! object.isPoints && ! object.isLineSegments && ! object.isLine && ! object.isLineLoop ) {
rangeFactor = 2;
}
let firstVertex = drawRange.start * rangeFactor;
let lastVertex = ( drawRange.start + drawRange.count ) * rangeFactor;
if ( group !== null ) {
firstVertex = Math.max( firstVertex, group.start * rangeFactor );
lastVertex = Math.min( lastVertex, ( group.start + group.count ) * rangeFactor );
}
const position = geometry.attributes.position;
let itemCount = Infinity;
if ( hasIndex ) {
itemCount = index.count;
} else if ( position !== undefined && position !== null ) {
itemCount = position.count;
}
firstVertex = Math.max( firstVertex, 0 );
lastVertex = Math.min( lastVertex, itemCount );
const count = lastVertex - firstVertex;
if ( count < 0 || count === Infinity ) return null;
drawParams.vertexCount = count;
drawParams.firstVertex = firstVertex;
return drawParams;
}
/**
* Returns the render object's geometry cache key.
*
* The geometry cache key is part of the material cache key.
*
* @return {String} The geometry cache key.
*/
getGeometryCacheKey() {
const { geometry } = this;
let cacheKey = '';
for ( const name of Object.keys( geometry.attributes ).sort() ) {
const attribute = geometry.attributes[ name ];
cacheKey += name + ',';
if ( attribute.data ) cacheKey += attribute.data.stride + ',';
if ( attribute.offset ) cacheKey += attribute.offset + ',';
if ( attribute.itemSize ) cacheKey += attribute.itemSize + ',';
if ( attribute.normalized ) cacheKey += 'n,';
}
// structural equality isn't sufficient for morph targets since the
// data are maintained in textures. only if the targets are all equal
// the texture and thus the instance of `MorphNode` can be shared.
for ( const name of Object.keys( geometry.morphAttributes ).sort() ) {
const targets = geometry.morphAttributes[ name ];
cacheKey += 'morph-' + name + ',';
for ( let i = 0, l = targets.length; i < l; i ++ ) {
const attribute = targets[ i ];
cacheKey += attribute.id + ',';
}
}
if ( geometry.index ) {
cacheKey += 'index,';
}
return cacheKey;
}
/**
* Returns the render object's material cache key.
*
* The material cache key is part of the render object cache key.
*
* @return {Number} The material cache key.
*/
getMaterialCacheKey() {
const { object, material } = this;
let cacheKey = material.customProgramCacheKey();
for ( const property of getKeys( material ) ) {
if ( /^(is[A-Z]|_)|^(visible|version|uuid|name|opacity|userData)$/.test( property ) ) continue;
const value = material[ property ];
let valueKey;
if ( value !== null ) {
// some material values require a formatting
const type = typeof value;
if ( type === 'number' ) {
valueKey = value !== 0 ? '1' : '0'; // Convert to on/off, important for clearcoat, transmission, etc
} else if ( type === 'object' ) {
valueKey = '{';
if ( value.isTexture ) {
valueKey += value.mapping;
}
valueKey += '}';
} else {
valueKey = String( value );
}
} else {
valueKey = String( value );
}
cacheKey += /*property + ':' +*/ valueKey + ',';
}
cacheKey += this.clippingContextCacheKey + ',';
if ( object.geometry ) {
cacheKey += this.getGeometryCacheKey();
}
if ( object.skeleton ) {
cacheKey += object.skeleton.bones.length + ',';
}
if ( object.isBatchedMesh ) {
cacheKey += object._matricesTexture.uuid + ',';
if ( object._colorsTexture !== null ) {
cacheKey += object._colorsTexture.uuid + ',';
}
}
if ( object.count > 1 ) {
// TODO: https://github.com/mrdoob/three.js/pull/29066#issuecomment-2269400850
cacheKey += object.uuid + ',';
}
cacheKey += object.receiveShadow + ',';
return hashString( cacheKey );
}
/**
* Whether the geometry requires an update or not.
*
* @type {Boolean}
* @readonly
*/
get needsGeometryUpdate() {
return this.geometry.id !== this.object.geometry.id;
}
/**
* Whether the render object requires an update or not.
*
* Note: There are two distinct places where render objects are checked for an update.
*
* 1. In `RenderObjects.get()` which is executed when the render object is request. This
* method checks the `needsUpdate` flag and recreates the render object if necessary.
* 2. In `Renderer._renderObjectDirect()` right after getting the render object via
* `RenderObjects.get()`. The render object's NodeMaterialObserver is then used to detect
* a need for a refresh due to material, geometry or object related value changes.
*
* TODO: Investigate if it's possible to merge both steps so there is only a single place
* that performs the 'needsUpdate' check.
*
* @type {Boolean}
* @readonly
*/
get needsUpdate() {
return /*this.object.static !== true &&*/ ( this.initialNodesCacheKey !== this.getDynamicCacheKey() || this.clippingNeedsUpdate );
}
/**
* Returns the dynamic cache key which represents a key that is computed per draw command.
*
* @return {Number} The cache key.
*/
getDynamicCacheKey() {
let cacheKey = 0;
// `Nodes.getCacheKey()` returns an environment cache key which is not relevant when
// the renderer is inside a shadow pass.
if ( this.material.isShadowPassMaterial !== true ) {
cacheKey = this._nodes.getCacheKey( this.scene, this.lightsNode );
}
if ( this.camera.isArrayCamera ) {
cacheKey = hash$1( cacheKey, this.camera.cameras.length );
}
if ( this.object.receiveShadow ) {
cacheKey = hash$1( cacheKey, 1 );
}
return cacheKey;
}
/**
* Returns the render object's cache key.
*
* @return {Number} The cache key.
*/
getCacheKey() {
return this.getMaterialCacheKey() + this.getDynamicCacheKey();
}
/**
* Frees internal resources.
*/
dispose() {
this.material.removeEventListener( 'dispose', this.onMaterialDispose );
this.onDispose();
}
}
const _chainKeys$5 = [];
/**
* This module manages the render objects of the renderer.
*
* @private
*/
class RenderObjects {
/**
* Constructs a new render object management component.
*
* @param {Renderer} renderer - The renderer.
* @param {Nodes} nodes - Renderer component for managing nodes related logic.
* @param {Geometries} geometries - Renderer component for managing geometries.
* @param {Pipelines} pipelines - Renderer component for managing pipelines.
* @param {Bindings} bindings - Renderer component for managing bindings.
* @param {Info} info - Renderer component for managing metrics and monitoring data.
*/
constructor( renderer, nodes, geometries, pipelines, bindings, info ) {
/**
* The renderer.
*
* @type {Renderer}
*/
this.renderer = renderer;
/**
* Renderer component for managing nodes related logic.
*
* @type {Nodes}
*/
this.nodes = nodes;
/**
* Renderer component for managing geometries.
*
* @type {Geometries}
*/
this.geometries = geometries;
/**
* Renderer component for managing pipelines.
*
* @type {Pipelines}
*/
this.pipelines = pipelines;
/**
* Renderer component for managing bindings.
*
* @type {Bindings}
*/
this.bindings = bindings;
/**
* Renderer component for managing metrics and monitoring data.
*
* @type {Info}
*/
this.info = info;
/**
* A dictionary that manages render contexts in chain maps
* for each pass ID.
*
* @type {Object<String,ChainMap>}
*/
this.chainMaps = {};
}
/**
* Returns a render object for the given object and state data.
*
* @param {Object3D} object - The 3D object.
* @param {Material} material - The 3D object's material.
* @param {Scene} scene - The scene the 3D object belongs to.
* @param {Camera} camera - The camera the 3D object should be rendered with.
* @param {LightsNode} lightsNode - The lights node.
* @param {RenderContext} renderContext - The render context.
* @param {ClippingContext} clippingContext - The clipping context.
* @param {String?} passId - An optional ID for identifying the pass.
* @return {RenderObject} The render object.
*/
get( object, material, scene, camera, lightsNode, renderContext, clippingContext, passId ) {
const chainMap = this.getChainMap( passId );
// reuse chainArray
_chainKeys$5[ 0 ] = object;
_chainKeys$5[ 1 ] = material;
_chainKeys$5[ 2 ] = renderContext;
_chainKeys$5[ 3 ] = lightsNode;
let renderObject = chainMap.get( _chainKeys$5 );
if ( renderObject === undefined ) {
renderObject = this.createRenderObject( this.nodes, this.geometries, this.renderer, object, material, scene, camera, lightsNode, renderContext, clippingContext, passId );
chainMap.set( _chainKeys$5, renderObject );
} else {
renderObject.updateClipping( clippingContext );
if ( renderObject.needsGeometryUpdate ) {
renderObject.setGeometry( object.geometry );
}
if ( renderObject.version !== material.version || renderObject.needsUpdate ) {
if ( renderObject.initialCacheKey !== renderObject.getCacheKey() ) {
renderObject.dispose();
renderObject = this.get( object, material, scene, camera, lightsNode, renderContext, clippingContext, passId );
} else {
renderObject.version = material.version;
}
}
}
_chainKeys$5.length = 0;
return renderObject;
}
/**
* Returns a chain map for the given pass ID.
*
* @param {String} [passId='default'] - The pass ID.
* @return {ChainMap} The chain map.
*/
getChainMap( passId = 'default' ) {
return this.chainMaps[ passId ] || ( this.chainMaps[ passId ] = new ChainMap() );
}
/**
* Frees internal resources.
*/
dispose() {
this.chainMaps = {};
}
/**
* Factory method for creating render objects with the given list of parameters.
*
* @param {Nodes} nodes - Renderer component for managing nodes related logic.
* @param {Geometries} geometries - Renderer component for managing geometries.
* @param {Renderer} renderer - The renderer.
* @param {Object3D} object - The 3D object.
* @param {Material} material - The object's material.
* @param {Scene} scene - The scene the 3D object belongs to.
* @param {Camera} camera - The camera the object should be rendered with.
* @param {LightsNode} lightsNode - The lights node.
* @param {RenderContext} renderContext - The render context.
* @param {ClippingContext} clippingContext - The clipping context.
* @param {String?} passId - An optional ID for identifying the pass.
* @return {RenderObject} The render object.
*/
createRenderObject( nodes, geometries, renderer, object, material, scene, camera, lightsNode, renderContext, clippingContext, passId ) {
const chainMap = this.getChainMap( passId );
const renderObject = new RenderObject( nodes, geometries, renderer, object, material, scene, camera, lightsNode, renderContext, clippingContext );
renderObject.onDispose = () => {
this.pipelines.delete( renderObject );
this.bindings.delete( renderObject );
this.nodes.delete( renderObject );
chainMap.delete( renderObject.getChainArray() );
};
return renderObject;
}
}
/**
* Data structure for the renderer. It is intended to manage
* data of objects in dictionaries.
*
* @private
*/
class DataMap {
/**
* Constructs a new data map.
*/
constructor() {
/**
* `DataMap` internally uses a weak map
* to manage its data.
*
* @type {WeakMap}
*/
this.data = new WeakMap();
}
/**
* Returns the dictionary for the given object.
*
* @param {Object} object - The object.
* @return {Object} The dictionary.
*/
get( object ) {
let map = this.data.get( object );
if ( map === undefined ) {
map = {};
this.data.set( object, map );
}
return map;
}
/**
* Deletes the dictionary for the given object.
*
* @param {Object} object - The object.
* @return {Object?} The deleted dictionary.
*/
delete( object ) {
let map = null;
if ( this.data.has( object ) ) {
map = this.data.get( object );
this.data.delete( object );
}
return map;
}
/**
* Returns `true` if the given object has a dictionary defined.
*
* @param {Object} object - The object to test.
* @return {Boolean} Whether a dictionary is defined or not.
*/
has( object ) {
return this.data.has( object );
}
/**
* Frees internal resources.
*/
dispose() {
this.data = new WeakMap();
}
}
const AttributeType = {
VERTEX: 1,
INDEX: 2,
STORAGE: 3,
INDIRECT: 4
};
// size of a chunk in bytes (STD140 layout)
const GPU_CHUNK_BYTES = 16;
// @TODO: Move to src/constants.js
const BlendColorFactor = 211;
const OneMinusBlendColorFactor = 212;
/**
* This renderer module manages geometry attributes.
*
* @private
* @augments DataMap
*/
class Attributes extends DataMap {
/**
* Constructs a new attribute management component.
*
* @param {Backend} backend - The renderer's backend.
*/
constructor( backend ) {
super();
/**
* The renderer's backend.
*
* @type {Backend}
*/
this.backend = backend;
}
/**
* Deletes the data for the given attribute.
*
* @param {BufferAttribute} attribute - The attribute.
* @return {Object} The deleted attribute data.
*/
delete( attribute ) {
const attributeData = super.delete( attribute );
if ( attributeData !== undefined ) {
this.backend.destroyAttribute( attribute );
}
return attributeData;
}
/**
* Updates the given attribute. This method creates attribute buffers
* for new attributes and updates data for existing ones.
*
* @param {BufferAttribute} attribute - The attribute to update.
* @param {Number} type - The attribute type.
*/
update( attribute, type ) {
const data = this.get( attribute );
if ( data.version === undefined ) {
if ( type === AttributeType.VERTEX ) {
this.backend.createAttribute( attribute );
} else if ( type === AttributeType.INDEX ) {
this.backend.createIndexAttribute( attribute );
} else if ( type === AttributeType.STORAGE ) {
this.backend.createStorageAttribute( attribute );
} else if ( type === AttributeType.INDIRECT ) {
this.backend.createIndirectStorageAttribute( attribute );
}
data.version = this._getBufferAttribute( attribute ).version;
} else {
const bufferAttribute = this._getBufferAttribute( attribute );
if ( data.version < bufferAttribute.version || bufferAttribute.usage === DynamicDrawUsage ) {
this.backend.updateAttribute( attribute );
data.version = bufferAttribute.version;
}
}
}
/**
* Utility method for handling interleaved buffer attributes correctly.
* To process them, their `InterleavedBuffer` is returned.
*
* @param {BufferAttribute} attribute - The attribute.
* @return {BufferAttribute|InterleavedBuffer}
*/
_getBufferAttribute( attribute ) {
if ( attribute.isInterleavedBufferAttribute ) attribute = attribute.data;
return attribute;
}
}
/**
* Returns the wireframe version for the given geometry.
*
* @private
* @function
* @param {BufferGeometry} geometry - The geometry.
* @return {Number} The version.
*/
function getWireframeVersion( geometry ) {
return ( geometry.index !== null ) ? geometry.index.version : geometry.attributes.position.version;
}
/**
* Returns a wireframe index attribute for the given geometry.
*
* @private
* @function
* @param {BufferGeometry} geometry - The geometry.
* @return {BufferAttribute} The wireframe index attribute.
*/
function getWireframeIndex( geometry ) {
const indices = [];
const geometryIndex = geometry.index;
const geometryPosition = geometry.attributes.position;
if ( geometryIndex !== null ) {
const array = geometryIndex.array;
for ( let i = 0, l = array.length; i < l; i += 3 ) {
const a = array[ i + 0 ];
const b = array[ i + 1 ];
const c = array[ i + 2 ];
indices.push( a, b, b, c, c, a );
}
} else {
const array = geometryPosition.array;
for ( let i = 0, l = ( array.length / 3 ) - 1; i < l; i += 3 ) {
const a = i + 0;
const b = i + 1;
const c = i + 2;
indices.push( a, b, b, c, c, a );
}
}
const attribute = new ( arrayNeedsUint32( indices ) ? Uint32BufferAttribute : Uint16BufferAttribute )( indices, 1 );
attribute.version = getWireframeVersion( geometry );
return attribute;
}
/**
* This renderer module manages geometries.
*
* @private
* @augments DataMap
*/
class Geometries extends DataMap {
/**
* Constructs a new geometry management component.
*
* @param {Attributes} attributes - Renderer component for managing attributes.
* @param {Info} info - Renderer component for managing metrics and monitoring data.
*/
constructor( attributes, info ) {
super();
/**
* Renderer component for managing attributes.
*
* @type {Attributes}
*/
this.attributes = attributes;
/**
* Renderer component for managing metrics and monitoring data.
*
* @type {Info}
*/
this.info = info;
/**
* Weak Map for managing attributes for wireframe rendering.
*
* @type {WeakMap<BufferGeometry,BufferAttribute>}
*/
this.wireframes = new WeakMap();
/**
* This Weak Map is used to make sure buffer attributes are
* updated only once per render call.
*
* @type {WeakMap<BufferAttribute,Number>}
*/
this.attributeCall = new WeakMap();
}
/**
* Returns `true` if the given render object has an initialized geometry.
*
* @param {RenderObject} renderObject - The render object.
* @return {Boolean} Whether if the given render object has an initialized geometry or not.
*/
has( renderObject ) {
const geometry = renderObject.geometry;
return super.has( geometry ) && this.get( geometry ).initialized === true;
}
/**
* Prepares the geometry of the given render object for rendering.
*
* @param {RenderObject} renderObject - The render object.
*/
updateForRender( renderObject ) {
if ( this.has( renderObject ) === false ) this.initGeometry( renderObject );
this.updateAttributes( renderObject );
}
/**
* Initializes the geometry of the given render object.
*
* @param {RenderObject} renderObject - The render object.
*/
initGeometry( renderObject ) {
const geometry = renderObject.geometry;
const geometryData = this.get( geometry );
geometryData.initialized = true;
this.info.memory.geometries ++;
const onDispose = () => {
this.info.memory.geometries --;
const index = geometry.index;
const geometryAttributes = renderObject.getAttributes();
if ( index !== null ) {
this.attributes.delete( index );
}
for ( const geometryAttribute of geometryAttributes ) {
this.attributes.delete( geometryAttribute );
}
const wireframeAttribute = this.wireframes.get( geometry );
if ( wireframeAttribute !== undefined ) {
this.attributes.delete( wireframeAttribute );
}
geometry.removeEventListener( 'dispose', onDispose );
};
geometry.addEventListener( 'dispose', onDispose );
}
/**
* Updates the geometry attributes of the given render object.
*
* @param {RenderObject} renderObject - The render object.
*/
updateAttributes( renderObject ) {
// attributes
const attributes = renderObject.getAttributes();
for ( const attribute of attributes ) {
if ( attribute.isStorageBufferAttribute || attribute.isStorageInstancedBufferAttribute ) {
this.updateAttribute( attribute, AttributeType.STORAGE );
} else {
this.updateAttribute( attribute, AttributeType.VERTEX );
}
}
// indexes
const index = this.getIndex( renderObject );
if ( index !== null ) {
this.updateAttribute( index, AttributeType.INDEX );
}
// indirect
const indirect = renderObject.geometry.indirect;
if ( indirect !== null ) {
this.updateAttribute( indirect, AttributeType.INDIRECT );
}
}
/**
* Updates the given attribute.
*
* @param {BufferAttribute} attribute - The attribute to update.
* @param {Number} type - The attribute type.
*/
updateAttribute( attribute, type ) {
const callId = this.info.render.calls;
if ( ! attribute.isInterleavedBufferAttribute ) {
if ( this.attributeCall.get( attribute ) !== callId ) {
this.attributes.update( attribute, type );
this.attributeCall.set( attribute, callId );
}
} else {
if ( this.attributeCall.get( attribute ) === undefined ) {
this.attributes.update( attribute, type );
this.attributeCall.set( attribute, callId );
} else if ( this.attributeCall.get( attribute.data ) !== callId ) {
this.attributes.update( attribute, type );
this.attributeCall.set( attribute.data, callId );
this.attributeCall.set( attribute, callId );
}
}
}
/**
* Returns the indirect buffer attribute of the given render object.
*
* @param {RenderObject} renderObject - The render object.
* @return {BufferAttribute?} The indirect attribute. `null` if no indirect drawing is used.
*/
getIndirect( renderObject ) {
return renderObject.geometry.indirect;
}
/**
* Returns the index of the given render object's geometry. This is implemented
* in a method to return a wireframe index if necessary.
*
* @param {RenderObject} renderObject - The render object.
* @return {BufferAttribute?} The index. Returns `null` for non-indexed geometries.
*/
getIndex( renderObject ) {
const { geometry, material } = renderObject;
let index = geometry.index;
if ( material.wireframe === true ) {
const wireframes = this.wireframes;
let wireframeAttribute = wireframes.get( geometry );
if ( wireframeAttribute === undefined ) {
wireframeAttribute = getWireframeIndex( geometry );
wireframes.set( geometry, wireframeAttribute );
} else if ( wireframeAttribute.version !== getWireframeVersion( geometry ) ) {
this.attributes.delete( wireframeAttribute );
wireframeAttribute = getWireframeIndex( geometry );
wireframes.set( geometry, wireframeAttribute );
}
index = wireframeAttribute;
}
return index;
}
}
/**
* This renderer module provides a series of statistical information
* about the GPU memory and the rendering process. Useful for debugging
* and monitoring.
*/
class Info {
/**
* Constructs a new info component.
*/
constructor() {
/**
* Whether frame related metrics should automatically
* be resetted or not. This property should be set to `false`
* by apps which manage their own animation loop. They must
* then call `renderer.info.reset()` once per frame manually.
*
* @type {Boolean}
* @default true
*/
this.autoReset = true;
/**
* The current frame ID. This ID is managed
* by `NodeFrame`.
*
* @type {Number}
* @readonly
* @default 0
*/
this.frame = 0;
/**
* The number of render calls since the
* app has been started.
*
* @type {Number}
* @readonly
* @default 0
*/
this.calls = 0;
/**
* Render related metrics.
*
* @type {Object}
* @readonly
* @property {Number} calls - The number of render calls since the app has been started.
* @property {Number} frameCalls - The number of render calls of the current frame.
* @property {Number} drawCalls - The number of draw calls of the current frame.
* @property {Number} triangles - The number of rendered triangle primitives of the current frame.
* @property {Number} points - The number of rendered point primitives of the current frame.
* @property {Number} lines - The number of rendered line primitives of the current frame.
* @property {Number} timestamp - The timestamp of the frame when using `renderer.renderAsync()`.
*/
this.render = {
calls: 0,
frameCalls: 0,
drawCalls: 0,
triangles: 0,
points: 0,
lines: 0,
timestamp: 0,
};
/**
* Compute related metrics.
*
* @type {Object}
* @readonly
* @property {Number} calls - The number of compute calls since the app has been started.
* @property {Number} frameCalls - The number of compute calls of the current frame.
* @property {Number} timestamp - The timestamp of the frame when using `renderer.computeAsync()`.
*/
this.compute = {
calls: 0,
frameCalls: 0,
timestamp: 0
};
/**
* Memory related metrics.
*
* @type {Object}
* @readonly
* @property {Number} geometries - The number of active geometries.
* @property {Number} frameCalls - The number of active textures.
*/
this.memory = {
geometries: 0,
textures: 0
};
}
/**
* This method should be executed per draw call and updates the corresponding metrics.
*
* @param {Object3D} object - The 3D object that is going to be rendered.
* @param {Number} count - The vertex or index count.
* @param {Number} instanceCount - The instance count.
*/
update( object, count, instanceCount ) {
this.render.drawCalls ++;
if ( object.isMesh || object.isSprite ) {
this.render.triangles += instanceCount * ( count / 3 );
} else if ( object.isPoints ) {
this.render.points += instanceCount * count;
} else if ( object.isLineSegments ) {
this.render.lines += instanceCount * ( count / 2 );
} else if ( object.isLine ) {
this.render.lines += instanceCount * ( count - 1 );
} else {
console.error( 'THREE.WebGPUInfo: Unknown object type.' );
}
}
/**
* Resets frame related metrics.
*/
reset() {
this.render.drawCalls = 0;
this.render.frameCalls = 0;
this.compute.frameCalls = 0;
this.render.triangles = 0;
this.render.points = 0;
this.render.lines = 0;
}
/**
* Performs a complete reset of the object.
*/
dispose() {
this.reset();
this.calls = 0;
this.render.calls = 0;
this.compute.calls = 0;
this.render.timestamp = 0;
this.compute.timestamp = 0;
this.memory.geometries = 0;
this.memory.textures = 0;
}
}
/**
* Abstract class for representing pipelines.
*
* @private
* @abstract
*/
class Pipeline {
/**
* Constructs a new pipeline.
*
* @param {String} cacheKey - The pipeline's cache key.
*/
constructor( cacheKey ) {
/**
* The pipeline's cache key.
*
* @type {String}
*/
this.cacheKey = cacheKey;
/**
* How often the pipeline is currently in use.
*
* @type {Number}
* @default 0
*/
this.usedTimes = 0;
}
}
/**
* Class for representing render pipelines.
*
* @private
* @augments Pipeline
*/
class RenderPipeline extends Pipeline {
/**
* Constructs a new render pipeline.
*
* @param {String} cacheKey - The pipeline's cache key.
* @param {ProgrammableStage} vertexProgram - The pipeline's vertex shader.
* @param {ProgrammableStage} fragmentProgram - The pipeline's fragment shader.
*/
constructor( cacheKey, vertexProgram, fragmentProgram ) {
super( cacheKey );
/**
* The pipeline's vertex shader.
*
* @type {ProgrammableStage}
*/
this.vertexProgram = vertexProgram;
/**
* The pipeline's fragment shader.
*
* @type {ProgrammableStage}
*/
this.fragmentProgram = fragmentProgram;
}
}
/**
* Class for representing compute pipelines.
*
* @private
* @augments Pipeline
*/
class ComputePipeline extends Pipeline {
/**
* Constructs a new render pipeline.
*
* @param {String} cacheKey - The pipeline's cache key.
* @param {ProgrammableStage} computeProgram - The pipeline's compute shader.
*/
constructor( cacheKey, computeProgram ) {
super( cacheKey );
/**
* The pipeline's compute shader.
*
* @type {ProgrammableStage}
*/
this.computeProgram = computeProgram;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isComputePipeline = true;
}
}
let _id$8 = 0;
/**
* Class for representing programmable stages which are vertex,
* fragment or compute shaders. Unlike fixed-function states (like blending),
* they represent the programmable part of a pipeline.
*
* @private
*/
class ProgrammableStage {
/**
* Constructs a new programmable stage.
*
* @param {String} code - The shader code.
* @param {('vertex'|'fragment'|'compute')} stage - The type of stage.
* @param {String} name - The name of the shader.
* @param {Array<Object>?} [transforms=null] - The transforms (only relevant for compute stages with WebGL 2 which uses Transform Feedback).
* @param {Array<Object>?} [attributes=null] - The attributes (only relevant for compute stages with WebGL 2 which uses Transform Feedback).
*/
constructor( code, stage, name, transforms = null, attributes = null ) {
/**
* The id of the programmable stage.
*
* @type {Number}
*/
this.id = _id$8 ++;
/**
* The shader code.
*
* @type {String}
*/
this.code = code;
/**
* The type of stage.
*
* @type {String}
*/
this.stage = stage;
/**
* The name of the stage.
* This is used for debugging purposes.
*
* @type {String}
*/
this.name = name;
/**
* The transforms (only relevant for compute stages with WebGL 2 which uses Transform Feedback).
*
* @type {Array<Object>?}
*/
this.transforms = transforms;
/**
* The attributes (only relevant for compute stages with WebGL 2 which uses Transform Feedback).
*
* @type {Array<Object>?}
*/
this.attributes = attributes;
/**
* How often the programmable stage is currently in use.
*
* @type {Number}
* @default 0
*/
this.usedTimes = 0;
}
}
/**
* This renderer module manages the pipelines of the renderer.
*
* @private
* @augments DataMap
*/
class Pipelines extends DataMap {
/**
* Constructs a new pipeline management component.
*
* @param {Backend} backend - The renderer's backend.
* @param {Nodes} nodes - Renderer component for managing nodes related logic.
*/
constructor( backend, nodes ) {
super();
/**
* The renderer's backend.
*
* @type {Backend}
*/
this.backend = backend;
/**
* Renderer component for managing nodes related logic.
*
* @type {Nodes}
*/
this.nodes = nodes;
/**
* A references to the bindings management component.
* This reference will be set inside the `Bindings`
* constructor.
*
* @type {Bindings?}
* @default null
*/
this.bindings = null;
/**
* Internal cache for maintaining pipelines.
* The key of the map is a cache key, the value the pipeline.
*
* @type {Map<String,Pipeline>}
*/
this.caches = new Map();
/**
* This dictionary maintains for each shader stage type (vertex,
* fragment and compute) the programmable stage objects which
* represent the actual shader code.
*
* @type {Object<String,Map>}
*/
this.programs = {
vertex: new Map(),
fragment: new Map(),
compute: new Map()
};
}
/**
* Returns a compute pipeline for the given compute node.
*
* @param {Node} computeNode - The compute node.
* @param {Array<BindGroup>} bindings - The bindings.
* @return {ComputePipeline} The compute pipeline.
*/
getForCompute( computeNode, bindings ) {
const { backend } = this;
const data = this.get( computeNode );
if ( this._needsComputeUpdate( computeNode ) ) {
const previousPipeline = data.pipeline;
if ( previousPipeline ) {
previousPipeline.usedTimes --;
previousPipeline.computeProgram.usedTimes --;
}
// get shader
const nodeBuilderState = this.nodes.getForCompute( computeNode );
// programmable stage
let stageCompute = this.programs.compute.get( nodeBuilderState.computeShader );
if ( stageCompute === undefined ) {
if ( previousPipeline && previousPipeline.computeProgram.usedTimes === 0 ) this._releaseProgram( previousPipeline.computeProgram );
stageCompute = new ProgrammableStage( nodeBuilderState.computeShader, 'compute', computeNode.name, nodeBuilderState.transforms, nodeBuilderState.nodeAttributes );
this.programs.compute.set( nodeBuilderState.computeShader, stageCompute );
backend.createProgram( stageCompute );
}
// determine compute pipeline
const cacheKey = this._getComputeCacheKey( computeNode, stageCompute );
let pipeline = this.caches.get( cacheKey );
if ( pipeline === undefined ) {
if ( previousPipeline && previousPipeline.usedTimes === 0 ) this._releasePipeline( previousPipeline );
pipeline = this._getComputePipeline( computeNode, stageCompute, cacheKey, bindings );
}
// keep track of all used times
pipeline.usedTimes ++;
stageCompute.usedTimes ++;
//
data.version = computeNode.version;
data.pipeline = pipeline;
}
return data.pipeline;
}
/**
* Returns a render pipeline for the given render object.
*
* @param {RenderObject} renderObject - The render object.
* @param {Array<Promise>?} [promises=null] - An array of compilation promises which is only relevant in context of `Renderer.compileAsync()`.
* @return {RenderPipeline} The render pipeline.
*/
getForRender( renderObject, promises = null ) {
const { backend } = this;
const data = this.get( renderObject );
if ( this._needsRenderUpdate( renderObject ) ) {
const previousPipeline = data.pipeline;
if ( previousPipeline ) {
previousPipeline.usedTimes --;
previousPipeline.vertexProgram.usedTimes --;
previousPipeline.fragmentProgram.usedTimes --;
}
// get shader
const nodeBuilderState = renderObject.getNodeBuilderState();
const name = renderObject.material ? renderObject.material.name : '';
// programmable stages
let stageVertex = this.programs.vertex.get( nodeBuilderState.vertexShader );
if ( stageVertex === undefined ) {
if ( previousPipeline && previousPipeline.vertexProgram.usedTimes === 0 ) this._releaseProgram( previousPipeline.vertexProgram );
stageVertex = new ProgrammableStage( nodeBuilderState.vertexShader, 'vertex', name );
this.programs.vertex.set( nodeBuilderState.vertexShader, stageVertex );
backend.createProgram( stageVertex );
}
let stageFragment = this.programs.fragment.get( nodeBuilderState.fragmentShader );
if ( stageFragment === undefined ) {
if ( previousPipeline && previousPipeline.fragmentProgram.usedTimes === 0 ) this._releaseProgram( previousPipeline.fragmentProgram );
stageFragment = new ProgrammableStage( nodeBuilderState.fragmentShader, 'fragment', name );
this.programs.fragment.set( nodeBuilderState.fragmentShader, stageFragment );
backend.createProgram( stageFragment );
}
// determine render pipeline
const cacheKey = this._getRenderCacheKey( renderObject, stageVertex, stageFragment );
let pipeline = this.caches.get( cacheKey );
if ( pipeline === undefined ) {
if ( previousPipeline && previousPipeline.usedTimes === 0 ) this._releasePipeline( previousPipeline );
pipeline = this._getRenderPipeline( renderObject, stageVertex, stageFragment, cacheKey, promises );
} else {
renderObject.pipeline = pipeline;
}
// keep track of all used times
pipeline.usedTimes ++;
stageVertex.usedTimes ++;
stageFragment.usedTimes ++;
//
data.pipeline = pipeline;
}
return data.pipeline;
}
/**
* Deletes the pipeline for the given render object.
*
* @param {RenderObject} object - The render object.
* @return {Object?} The deleted dictionary.
*/
delete( object ) {
const pipeline = this.get( object ).pipeline;
if ( pipeline ) {
// pipeline
pipeline.usedTimes --;
if ( pipeline.usedTimes === 0 ) this._releasePipeline( pipeline );
// programs
if ( pipeline.isComputePipeline ) {
pipeline.computeProgram.usedTimes --;
if ( pipeline.computeProgram.usedTimes === 0 ) this._releaseProgram( pipeline.computeProgram );
} else {
pipeline.fragmentProgram.usedTimes --;
pipeline.vertexProgram.usedTimes --;
if ( pipeline.vertexProgram.usedTimes === 0 ) this._releaseProgram( pipeline.vertexProgram );
if ( pipeline.fragmentProgram.usedTimes === 0 ) this._releaseProgram( pipeline.fragmentProgram );
}
}
return super.delete( object );
}
/**
* Frees internal resources.
*/
dispose() {
super.dispose();
this.caches = new Map();
this.programs = {
vertex: new Map(),
fragment: new Map(),
compute: new Map()
};
}
/**
* Updates the pipeline for the given render object.
*
* @param {RenderObject} renderObject - The render object.
*/
updateForRender( renderObject ) {
this.getForRender( renderObject );
}
/**
* Returns a compute pipeline for the given parameters.
*
* @private
* @param {Node} computeNode - The compute node.
* @param {ProgrammableStage} stageCompute - The programmable stage representing the compute shader.
* @param {String} cacheKey - The cache key.
* @param {Array<BindGroup>} bindings - The bindings.
* @return {ComputePipeline} The compute pipeline.
*/
_getComputePipeline( computeNode, stageCompute, cacheKey, bindings ) {
// check for existing pipeline
cacheKey = cacheKey || this._getComputeCacheKey( computeNode, stageCompute );
let pipeline = this.caches.get( cacheKey );
if ( pipeline === undefined ) {
pipeline = new ComputePipeline( cacheKey, stageCompute );
this.caches.set( cacheKey, pipeline );
this.backend.createComputePipeline( pipeline, bindings );
}
return pipeline;
}
/**
* Returns a render pipeline for the given parameters.
*
* @private
* @param {RenderObject} renderObject - The render object.
* @param {ProgrammableStage} stageVertex - The programmable stage representing the vertex shader.
* @param {ProgrammableStage} stageFragment - The programmable stage representing the fragment shader.
* @param {String} cacheKey - The cache key.
* @param {Array<Promise>?} promises - An array of compilation promises which is only relevant in context of `Renderer.compileAsync()`.
* @return {ComputePipeline} The compute pipeline.
*/
_getRenderPipeline( renderObject, stageVertex, stageFragment, cacheKey, promises ) {
// check for existing pipeline
cacheKey = cacheKey || this._getRenderCacheKey( renderObject, stageVertex, stageFragment );
let pipeline = this.caches.get( cacheKey );
if ( pipeline === undefined ) {
pipeline = new RenderPipeline( cacheKey, stageVertex, stageFragment );
this.caches.set( cacheKey, pipeline );
renderObject.pipeline = pipeline;
// The `promises` array is `null` by default and only set to an empty array when
// `Renderer.compileAsync()` is used. The next call actually fills the array with
// pending promises that resolve when the render pipelines are ready for rendering.
this.backend.createRenderPipeline( renderObject, promises );
}
return pipeline;
}
/**
* Computes a cache key representing a compute pipeline.
*
* @private
* @param {Node} computeNode - The compute node.
* @param {ProgrammableStage} stageCompute - The programmable stage representing the compute shader.
* @return {String} The cache key.
*/
_getComputeCacheKey( computeNode, stageCompute ) {
return computeNode.id + ',' + stageCompute.id;
}
/**
* Computes a cache key representing a render pipeline.
*
* @private
* @param {RenderObject} renderObject - The render object.
* @param {ProgrammableStage} stageVertex - The programmable stage representing the vertex shader.
* @param {ProgrammableStage} stageFragment - The programmable stage representing the fragment shader.
* @return {String} The cache key.
*/
_getRenderCacheKey( renderObject, stageVertex, stageFragment ) {
return stageVertex.id + ',' + stageFragment.id + ',' + this.backend.getRenderCacheKey( renderObject );
}
/**
* Releases the given pipeline.
*
* @private
* @param {Pipeline} pipeline - The pipeline to release.
*/
_releasePipeline( pipeline ) {
this.caches.delete( pipeline.cacheKey );
}
/**
* Releases the shader program.
*
* @private
* @param {Object} program - The shader program to release.
*/
_releaseProgram( program ) {
const code = program.code;
const stage = program.stage;
this.programs[ stage ].delete( code );
}
/**
* Returns `true` if the compute pipeline for the given compute node requires an update.
*
* @private
* @param {Node} computeNode - The compute node.
* @return {Boolean} Whether the compute pipeline for the given compute node requires an update or not.
*/
_needsComputeUpdate( computeNode ) {
const data = this.get( computeNode );
return data.pipeline === undefined || data.version !== computeNode.version;
}
/**
* Returns `true` if the render pipeline for the given render object requires an update.
*
* @private
* @param {RenderObject} renderObject - The render object.
* @return {Boolean} Whether the render object for the given render object requires an update or not.
*/
_needsRenderUpdate( renderObject ) {
const data = this.get( renderObject );
return data.pipeline === undefined || this.backend.needsRenderUpdate( renderObject );
}
}
/**
* This renderer module manages the bindings of the renderer.
*
* @private
* @augments DataMap
*/
class Bindings extends DataMap {
/**
* Constructs a new bindings management component.
*
* @param {Backend} backend - The renderer's backend.
* @param {Nodes} nodes - Renderer component for managing nodes related logic.
* @param {Textures} textures - Renderer component for managing textures.
* @param {Attributes} attributes - Renderer component for managing attributes.
* @param {Pipelines} pipelines - Renderer component for managing pipelines.
* @param {Info} info - Renderer component for managing metrics and monitoring data.
*/
constructor( backend, nodes, textures, attributes, pipelines, info ) {
super();
/**
* The renderer's backend.
*
* @type {Backend}
*/
this.backend = backend;
/**
* Renderer component for managing textures.
*
* @type {Textures}
*/
this.textures = textures;
/**
* Renderer component for managing pipelines.
*
* @type {Pipelines}
*/
this.pipelines = pipelines;
/**
* Renderer component for managing attributes.
*
* @type {Attributes}
*/
this.attributes = attributes;
/**
* Renderer component for managing nodes related logic.
*
* @type {Nodes}
*/
this.nodes = nodes;
/**
* Renderer component for managing metrics and monitoring data.
*
* @type {Info}
*/
this.info = info;
this.pipelines.bindings = this; // assign bindings to pipelines
}
/**
* Returns the bind groups for the given render object.
*
* @param {RenderObject} renderObject - The render object.
* @return {Array<BindGroup>} The bind groups.
*/
getForRender( renderObject ) {
const bindings = renderObject.getBindings();
for ( const bindGroup of bindings ) {
const groupData = this.get( bindGroup );
if ( groupData.bindGroup === undefined ) {
// each object defines an array of bindings (ubos, textures, samplers etc.)
this._init( bindGroup );
this.backend.createBindings( bindGroup, bindings, 0 );
groupData.bindGroup = bindGroup;
}
}
return bindings;
}
/**
* Returns the bind groups for the given compute node.
*
* @param {Node} computeNode - The compute node.
* @return {Array<BindGroup>} The bind groups.
*/
getForCompute( computeNode ) {
const bindings = this.nodes.getForCompute( computeNode ).bindings;
for ( const bindGroup of bindings ) {
const groupData = this.get( bindGroup );
if ( groupData.bindGroup === undefined ) {
this._init( bindGroup );
this.backend.createBindings( bindGroup, bindings, 0 );
groupData.bindGroup = bindGroup;
}
}
return bindings;
}
/**
* Updates the bindings for the given compute node.
*
* @param {Node} computeNode - The compute node.
*/
updateForCompute( computeNode ) {
this._updateBindings( this.getForCompute( computeNode ) );
}
/**
* Updates the bindings for the given render object.
*
* @param {RenderObject} renderObject - The render object.
*/
updateForRender( renderObject ) {
this._updateBindings( this.getForRender( renderObject ) );
}
/**
* Updates the given array of bindings.
*
* @param {Array<BindGroup>} bindings - The bind groups.
*/
_updateBindings( bindings ) {
for ( const bindGroup of bindings ) {
this._update( bindGroup, bindings );
}
}
/**
* Initializes the given bind group.
*
* @param {BindGroup} bindGroup - The bind group to initialize.
*/
_init( bindGroup ) {
for ( const binding of bindGroup.bindings ) {
if ( binding.isSampledTexture ) {
this.textures.updateTexture( binding.texture );
} else if ( binding.isStorageBuffer ) {
const attribute = binding.attribute;
const attributeType = attribute.isIndirectStorageBufferAttribute ? AttributeType.INDIRECT : AttributeType.STORAGE;
this.attributes.update( attribute, attributeType );
}
}
}
/**
* Updates the given bind group.
*
* @param {BindGroup} bindGroup - The bind group to update.
* @param {Array<BindGroup>} bindings - The bind groups.
*/
_update( bindGroup, bindings ) {
const { backend } = this;
let needsBindingsUpdate = false;
let cacheBindings = true;
let cacheIndex = 0;
let version = 0;
// iterate over all bindings and check if buffer updates or a new binding group is required
for ( const binding of bindGroup.bindings ) {
if ( binding.isNodeUniformsGroup ) {
const updated = this.nodes.updateGroup( binding );
// every uniforms group is a uniform buffer. So if no update is required,
// we move one with the next binding. Otherwise the next if block will update the group.
if ( updated === false ) continue;
}
if ( binding.isUniformBuffer ) {
const updated = binding.update();
if ( updated ) {
backend.updateBinding( binding );
}
} else if ( binding.isSampler ) {
binding.update();
} else if ( binding.isSampledTexture ) {
const texturesTextureData = this.textures.get( binding.texture );
if ( binding.needsBindingsUpdate( texturesTextureData.generation ) ) needsBindingsUpdate = true;
const updated = binding.update();
const texture = binding.texture;
if ( updated ) {
this.textures.updateTexture( texture );
}
const textureData = backend.get( texture );
if ( textureData.externalTexture !== undefined || texturesTextureData.isDefaultTexture ) {
cacheBindings = false;
} else {
cacheIndex = cacheIndex * 10 + texture.id;
version += texture.version;
}
if ( backend.isWebGPUBackend === true && textureData.texture === undefined && textureData.externalTexture === undefined ) {
// TODO: Remove this once we found why updated === false isn't bound to a texture in the WebGPU backend
console.error( 'Bindings._update: binding should be available:', binding, updated, texture, binding.textureNode.value, needsBindingsUpdate );
this.textures.updateTexture( texture );
needsBindingsUpdate = true;
}
if ( texture.isStorageTexture === true ) {
const textureData = this.get( texture );
if ( binding.store === true ) {
textureData.needsMipmap = true;
} else if ( this.textures.needsMipmaps( texture ) && textureData.needsMipmap === true ) {
this.backend.generateMipmaps( texture );
textureData.needsMipmap = false;
}
}
}
}
if ( needsBindingsUpdate === true ) {
this.backend.updateBindings( bindGroup, bindings, cacheBindings ? cacheIndex : 0, version );
}
}
}
/**
* Default sorting function for opaque render items.
*
* @private
* @function
* @param {Object} a - The first render item.
* @param {Object} b - The second render item.
* @return {Number} A numeric value which defines the sort order.
*/
function painterSortStable( a, b ) {
if ( a.groupOrder !== b.groupOrder ) {
return a.groupOrder - b.groupOrder;
} else if ( a.renderOrder !== b.renderOrder ) {
return a.renderOrder - b.renderOrder;
} else if ( a.material.id !== b.material.id ) {
return a.material.id - b.material.id;
} else if ( a.z !== b.z ) {
return a.z - b.z;
} else {
return a.id - b.id;
}
}
/**
* Default sorting function for transparent render items.
*
* @private
* @function
* @param {Object} a - The first render item.
* @param {Object} b - The second render item.
* @return {Number} A numeric value which defines the sort order.
*/
function reversePainterSortStable( a, b ) {
if ( a.groupOrder !== b.groupOrder ) {
return a.groupOrder - b.groupOrder;
} else if ( a.renderOrder !== b.renderOrder ) {
return a.renderOrder - b.renderOrder;
} else if ( a.z !== b.z ) {
return b.z - a.z;
} else {
return a.id - b.id;
}
}
/**
* Returns `true` if the given transparent material requires a double pass.
*
* @private
* @function
* @param {Material} material - The transparent material.
* @return {Boolean} Whether the given material requires a double pass or not.
*/
function needsDoublePass( material ) {
const hasTransmission = material.transmission > 0 || material.transmissionNode;
return hasTransmission && material.side === DoubleSide && material.forceSinglePass === false;
}
/**
* When the renderer analyzes the scene at the beginning of a render call,
* it stores 3D object for further processing in render lists. Depending on the
* properties of a 3D objects (like their transformation or material state), the
* objects are maintained in ordered lists for the actual rendering.
*
* Render lists are unique per scene and camera combination.
*
* @private
* @augments Pipeline
*/
class RenderList {
/**
* Constructs a render list.
*
* @param {Lighting} lighting - The lighting management component.
* @param {Scene} scene - The scene.
* @param {Camera} camera - The camera the scene is rendered with.
*/
constructor( lighting, scene, camera ) {
/**
* 3D objects are transformed into render items and stored in this array.
*
* @type {Array<Object>}
*/
this.renderItems = [];
/**
* The current render items index.
*
* @type {Number}
* @default 0
*/
this.renderItemsIndex = 0;
/**
* A list with opaque render items.
*
* @type {Array<Object>}
*/
this.opaque = [];
/**
* A list with transparent render items which require
* double pass rendering (e.g. transmissive objects).
*
* @type {Array<Object>}
*/
this.transparentDoublePass = [];
/**
* A list with transparent render items.
*
* @type {Array<Object>}
*/
this.transparent = [];
/**
* A list with transparent render bundle data.
*
* @type {Array<Object>}
*/
this.bundles = [];
/**
* The render list's lights node. This node is later
* relevant for the actual analytical light nodes which
* compute the scene's lighting in the shader.
*
* @type {LightsNode}
*/
this.lightsNode = lighting.getNode( scene, camera );
/**
* The scene's lights stored in an array. This array
* is used to setup the lights node.
*
* @type {Array<Light>}
*/
this.lightsArray = [];
/**
* The scene.
*
* @type {Scene}
*/
this.scene = scene;
/**
* The camera the scene is rendered with.
*
* @type {Camera}
*/
this.camera = camera;
/**
* How many objects perform occlusion query tests.
*
* @type {Number}
* @default 0
*/
this.occlusionQueryCount = 0;
}
/**
* This method is called right at the beginning of a render call
* before the scene is analyzed. It prepares the internal data
* structures for the upcoming render lists generation.
*
* @return {RenderList} A reference to this render list.
*/
begin() {
this.renderItemsIndex = 0;
this.opaque.length = 0;
this.transparentDoublePass.length = 0;
this.transparent.length = 0;
this.bundles.length = 0;
this.lightsArray.length = 0;
this.occlusionQueryCount = 0;
return this;
}
/**
* Returns a render item for the giving render item state. The state is defined
* by a series of object-related parameters.
*
* The method avoids object creation by holding render items and reusing them in
* subsequent render calls (just with different property values).
*
* @param {Object3D} object - The 3D object.
* @param {BufferGeometry} geometry - The 3D object's geometry.
* @param {Material} material - The 3D object's material.
* @param {Number} groupOrder - The current group order.
* @param {Number} z - Th 3D object's depth value (z value in clip space).
* @param {Number?} group - {Object?} group - Only relevant for objects using multiple materials. This represents a group entry from the respective `BufferGeometry`.
* @param {ClippingContext} clippingContext - The current clipping context.
* @return {Object} The render item.
*/
getNextRenderItem( object, geometry, material, groupOrder, z, group, clippingContext ) {
let renderItem = this.renderItems[ this.renderItemsIndex ];
if ( renderItem === undefined ) {
renderItem = {
id: object.id,
object: object,
geometry: geometry,
material: material,
groupOrder: groupOrder,
renderOrder: object.renderOrder,
z: z,
group: group,
clippingContext: clippingContext
};
this.renderItems[ this.renderItemsIndex ] = renderItem;
} else {
renderItem.id = object.id;
renderItem.object = object;
renderItem.geometry = geometry;
renderItem.material = material;
renderItem.groupOrder = groupOrder;
renderItem.renderOrder = object.renderOrder;
renderItem.z = z;
renderItem.group = group;
renderItem.clippingContext = clippingContext;
}
this.renderItemsIndex ++;
return renderItem;
}
/**
* Pushes the given object as a render item to the internal render lists.
* The selected lists depend on the object properties.
*
* @param {Object3D} object - The 3D object.
* @param {BufferGeometry} geometry - The 3D object's geometry.
* @param {Material} material - The 3D object's material.
* @param {Number} groupOrder - The current group order.
* @param {Number} z - Th 3D object's depth value (z value in clip space).
* @param {Number?} group - {Object?} group - Only relevant for objects using multiple materials. This represents a group entry from the respective `BufferGeometry`.
* @param {ClippingContext} clippingContext - The current clipping context.
*/
push( object, geometry, material, groupOrder, z, group, clippingContext ) {
const renderItem = this.getNextRenderItem( object, geometry, material, groupOrder, z, group, clippingContext );
if ( object.occlusionTest === true ) this.occlusionQueryCount ++;
if ( material.transparent === true || material.transmission > 0 ) {
if ( needsDoublePass( material ) ) this.transparentDoublePass.push( renderItem );
this.transparent.push( renderItem );
} else {
this.opaque.push( renderItem );
}
}
/**
* Inserts the given object as a render item at the start of the internal render lists.
* The selected lists depend on the object properties.
*
* @param {Object3D} object - The 3D object.
* @param {BufferGeometry} geometry - The 3D object's geometry.
* @param {Material} material - The 3D object's material.
* @param {Number} groupOrder - The current group order.
* @param {Number} z - Th 3D object's depth value (z value in clip space).
* @param {Number?} group - {Object?} group - Only relevant for objects using multiple materials. This represents a group entry from the respective `BufferGeometry`.
* @param {ClippingContext} clippingContext - The current clipping context.
*/
unshift( object, geometry, material, groupOrder, z, group, clippingContext ) {
const renderItem = this.getNextRenderItem( object, geometry, material, groupOrder, z, group, clippingContext );
if ( material.transparent === true || material.transmission > 0 ) {
if ( needsDoublePass( material ) ) this.transparentDoublePass.unshift( renderItem );
this.transparent.unshift( renderItem );
} else {
this.opaque.unshift( renderItem );
}
}
/**
* Pushes render bundle group data into the render list.
*
* @param {Object} group - Bundle group data.
*/
pushBundle( group ) {
this.bundles.push( group );
}
/**
* Pushes a light into the render list.
*
* @param {Light} light - The light.
*/
pushLight( light ) {
this.lightsArray.push( light );
}
/**
* Sorts the internal render lists.
*
* @param {function(Any, Any): Number} customOpaqueSort - A custom sort function for opaque objects.
* @param {function(Any, Any): Number} customTransparentSort - A custom sort function for transparent objects.
*/
sort( customOpaqueSort, customTransparentSort ) {
if ( this.opaque.length > 1 ) this.opaque.sort( customOpaqueSort || painterSortStable );
if ( this.transparentDoublePass.length > 1 ) this.transparentDoublePass.sort( customTransparentSort || reversePainterSortStable );
if ( this.transparent.length > 1 ) this.transparent.sort( customTransparentSort || reversePainterSortStable );
}
/**
* This method performs finalizing tasks right after the render lists
* have been generated.
*/
finish() {
// update lights
this.lightsNode.setLights( this.lightsArray );
// Clear references from inactive renderItems in the list
for ( let i = this.renderItemsIndex, il = this.renderItems.length; i < il; i ++ ) {
const renderItem = this.renderItems[ i ];
if ( renderItem.id === null ) break;
renderItem.id = null;
renderItem.object = null;
renderItem.geometry = null;
renderItem.material = null;
renderItem.groupOrder = null;
renderItem.renderOrder = null;
renderItem.z = null;
renderItem.group = null;
renderItem.clippingContext = null;
}
}
}
const _chainKeys$4 = [];
/**
* This renderer module manages the render lists which are unique
* per scene and camera combination.
*
* @private
*/
class RenderLists {
/**
* Constructs a render lists management component.
*
* @param {Lighting} lighting - The lighting management component.
*/
constructor( lighting ) {
/**
* The lighting management component.
*
* @type {Lighting}
*/
this.lighting = lighting;
/**
* The internal chain map which holds the render lists.
*
* @type {ChainMap}
*/
this.lists = new ChainMap();
}
/**
* Returns a render list for the given scene and camera.
*
* @param {Scene} scene - The scene.
* @param {Camera} camera - The camera.
* @return {RenderList} The render list.
*/
get( scene, camera ) {
const lists = this.lists;
_chainKeys$4[ 0 ] = scene;
_chainKeys$4[ 1 ] = camera;
let list = lists.get( _chainKeys$4 );
if ( list === undefined ) {
list = new RenderList( this.lighting, scene, camera );
lists.set( _chainKeys$4, list );
}
_chainKeys$4.length = 0;
return list;
}
/**
* Frees all internal resources.
*/
dispose() {
this.lists = new ChainMap();
}
}
let _id$7 = 0;
/**
* Any render or compute command is executed in a specific context that defines
* the state of the renderer and its backend. Typical examples for such context
* data are the current clear values or data from the active framebuffer. This
* module is used to represent these contexts as objects.
*
* @private
*/
class RenderContext {
/**
* Constructs a new render context.
*/
constructor() {
/**
* The context's ID.
*
* @type {Number}
*/
this.id = _id$7 ++;
/**
* Whether the current active framebuffer has a color attachment.
*
* @type {Boolean}
* @default true
*/
this.color = true;
/**
* Whether the color attachment should be cleared or not.
*
* @type {Boolean}
* @default true
*/
this.clearColor = true;
/**
* The clear color value.
*
* @type {Object}
* @default true
*/
this.clearColorValue = { r: 0, g: 0, b: 0, a: 1 };
/**
* Whether the current active framebuffer has a depth attachment.
*
* @type {Boolean}
* @default true
*/
this.depth = true;
/**
* Whether the depth attachment should be cleared or not.
*
* @type {Boolean}
* @default true
*/
this.clearDepth = true;
/**
* The clear depth value.
*
* @type {Number}
* @default 1
*/
this.clearDepthValue = 1;
/**
* Whether the current active framebuffer has a stencil attachment.
*
* @type {Boolean}
* @default false
*/
this.stencil = false;
/**
* Whether the stencil attachment should be cleared or not.
*
* @type {Boolean}
* @default true
*/
this.clearStencil = true;
/**
* The clear stencil value.
*
* @type {Number}
* @default 1
*/
this.clearStencilValue = 1;
/**
* By default the viewport encloses the entire framebuffer If a smaller
* viewport is manually defined, this property is to `true` by the renderer.
*
* @type {Boolean}
* @default false
*/
this.viewport = false;
/**
* The viewport value. This value is in physical pixels meaning it incorporates
* the renderer's pixel ratio. The viewport property of render targets or
* the renderer is in logical pixels.
*
* @type {Vector4}
*/
this.viewportValue = new Vector4();
/**
* When the scissor test is active and scissor rectangle smaller than the
* framebuffers dimensions, this property is to `true` by the renderer.
*
* @type {Boolean}
* @default false
*/
this.scissor = false;
/**
* The scissor rectangle.
*
* @type {Vector4}
*/
this.scissorValue = new Vector4();
/**
* The active render target.
*
* @type {RenderTarget?}
* @default null
*/
this.renderTarget = null;
/**
* The textures of the active render target.
* `null` when no render target is set.
*
* @type {Array<Texture>?}
* @default null
*/
this.textures = null;
/**
* The depth texture of the active render target.
* `null` when no render target is set.
*
* @type {DepthTexture?}
* @default null
*/
this.depthTexture = null;
/**
* The active cube face.
*
* @type {Number}
* @default 0
*/
this.activeCubeFace = 0;
/**
* The active mipmap level.
*
* @type {Number}
* @default 0
*/
this.activeMipmapLevel = 0;
/**
* The number of MSAA samples. This value is always `1` when
* MSAA isn't used.
*
* @type {Number}
* @default 1
*/
this.sampleCount = 1;
/**
* The active render target's width in physical pixels.
*
* @type {Number}
* @default 0
*/
this.width = 0;
/**
* The active render target's height in physical pixels.
*
* @type {Number}
* @default 0
*/
this.height = 0;
/**
* The occlusion query count.
*
* @type {Number}
* @default 0
*/
this.occlusionQueryCount = 0;
/**
* The current clipping context.
*
* @type {ClippingContext?}
* @default null
*/
this.clippingContext = null;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isRenderContext = true;
}
/**
* Returns the cache key of this render context.
*
* @return {Number} The cache key.
*/
getCacheKey() {
return getCacheKey( this );
}
}
/**
* Computes a cache key for the given render context. This key
* should identify the render target state so it is possible to
* configure the correct attachments in the respective backend.
*
* @param {RenderContext} renderContext - The render context.
* @return {Number} The cache key.
*/
function getCacheKey( renderContext ) {
const { textures, activeCubeFace } = renderContext;
const values = [ activeCubeFace ];
for ( const texture of textures ) {
values.push( texture.id );
}
return hashArray( values );
}
const _chainKeys$3 = [];
const _defaultScene = /*@__PURE__*/ new Scene();
const _defaultCamera = /*@__PURE__*/ new Camera();
/**
* This module manages the render contexts of the renderer.
*
* @private
*/
class RenderContexts {
/**
* Constructs a new render context management component.
*/
constructor() {
/**
* A dictionary that manages render contexts in chain maps
* for each attachment state.
*
* @type {Object<String,ChainMap>}
*/
this.chainMaps = {};
}
/**
* Returns a render context for the given scene, camera and render target.
*
* @param {Scene} scene - The scene.
* @param {Camera} camera - The camera that is used to render the scene.
* @param {RenderTarget?} [renderTarget=null] - The active render target.
* @return {RenderContext} The render context.
*/
get( scene, camera, renderTarget = null ) {
_chainKeys$3[ 0 ] = scene;
_chainKeys$3[ 1 ] = camera;
let attachmentState;
if ( renderTarget === null ) {
attachmentState = 'default';
} else {
const format = renderTarget.texture.format;
const count = renderTarget.textures.length;
attachmentState = `${ count }:${ format }:${ renderTarget.samples }:${ renderTarget.depthBuffer }:${ renderTarget.stencilBuffer }`;
}
const chainMap = this._getChainMap( attachmentState );
let renderState = chainMap.get( _chainKeys$3 );
if ( renderState === undefined ) {
renderState = new RenderContext();
chainMap.set( _chainKeys$3, renderState );
}
_chainKeys$3.length = 0;
if ( renderTarget !== null ) renderState.sampleCount = renderTarget.samples === 0 ? 1 : renderTarget.samples;
return renderState;
}
/**
* Returns a render context intended for clear operations.
*
* @param {RenderTarget?} [renderTarget=null] - The active render target.
* @return {RenderContext} The render context.
*/
getForClear( renderTarget = null ) {
return this.get( _defaultScene, _defaultCamera, renderTarget );
}
/**
* Returns a chain map for the given attachment state.
*
* @private
* @param {String} attachmentState - The attachment state.
* @return {ChainMap} The chain map.
*/
_getChainMap( attachmentState ) {
return this.chainMaps[ attachmentState ] || ( this.chainMaps[ attachmentState ] = new ChainMap() );
}
/**
* Frees internal resources.
*/
dispose() {
this.chainMaps = {};
}
}
const _size$3 = /*@__PURE__*/ new Vector3();
/**
* This module manages the textures of the renderer.
*
* @private
* @augments DataMap
*/
class Textures extends DataMap {
/**
* Constructs a new texture management component.
*
* @param {Renderer} renderer - The renderer.
* @param {Backend} backend - The renderer's backend.
* @param {Info} info - Renderer component for managing metrics and monitoring data.
*/
constructor( renderer, backend, info ) {
super();
/**
* The renderer.
*
* @type {Renderer}
*/
this.renderer = renderer;
/**
* The backend.
*
* @type {Backend}
*/
this.backend = backend;
/**
* Renderer component for managing metrics and monitoring data.
*
* @type {Info}
*/
this.info = info;
}
/**
* Updates the given render target. Based on the given render target configuration,
* it updates the texture states representing the attachments of the framebuffer.
*
* @param {RenderTarget} renderTarget - The render target to update.
* @param {Number} [activeMipmapLevel=0] - The active mipmap level.
*/
updateRenderTarget( renderTarget, activeMipmapLevel = 0 ) {
const renderTargetData = this.get( renderTarget );
const sampleCount = renderTarget.samples === 0 ? 1 : renderTarget.samples;
const depthTextureMips = renderTargetData.depthTextureMips || ( renderTargetData.depthTextureMips = {} );
const textures = renderTarget.textures;
const size = this.getSize( textures[ 0 ] );
const mipWidth = size.width >> activeMipmapLevel;
const mipHeight = size.height >> activeMipmapLevel;
let depthTexture = renderTarget.depthTexture || depthTextureMips[ activeMipmapLevel ];
const useDepthTexture = renderTarget.depthBuffer === true || renderTarget.stencilBuffer === true;
let textureNeedsUpdate = false;
if ( depthTexture === undefined && useDepthTexture ) {
depthTexture = new DepthTexture();
depthTexture.format = renderTarget.stencilBuffer ? DepthStencilFormat : DepthFormat;
depthTexture.type = renderTarget.stencilBuffer ? UnsignedInt248Type : UnsignedIntType; // FloatType
depthTexture.image.width = mipWidth;
depthTexture.image.height = mipHeight;
depthTextureMips[ activeMipmapLevel ] = depthTexture;
}
if ( renderTargetData.width !== size.width || size.height !== renderTargetData.height ) {
textureNeedsUpdate = true;
if ( depthTexture ) {
depthTexture.needsUpdate = true;
depthTexture.image.width = mipWidth;
depthTexture.image.height = mipHeight;
}
}
renderTargetData.width = size.width;
renderTargetData.height = size.height;
renderTargetData.textures = textures;
renderTargetData.depthTexture = depthTexture || null;
renderTargetData.depth = renderTarget.depthBuffer;
renderTargetData.stencil = renderTarget.stencilBuffer;
renderTargetData.renderTarget = renderTarget;
if ( renderTargetData.sampleCount !== sampleCount ) {
textureNeedsUpdate = true;
if ( depthTexture ) {
depthTexture.needsUpdate = true;
}
renderTargetData.sampleCount = sampleCount;
}
//
const options = { sampleCount };
// XR render targets require no texture updates
if ( renderTarget.isXRRenderTarget !== true ) {
for ( let i = 0; i < textures.length; i ++ ) {
const texture = textures[ i ];
if ( textureNeedsUpdate ) texture.needsUpdate = true;
this.updateTexture( texture, options );
}
if ( depthTexture ) {
this.updateTexture( depthTexture, options );
}
}
// dispose handler
if ( renderTargetData.initialized !== true ) {
renderTargetData.initialized = true;
// dispose
const onDispose = () => {
renderTarget.removeEventListener( 'dispose', onDispose );
for ( let i = 0; i < textures.length; i ++ ) {
this._destroyTexture( textures[ i ] );
}
if ( depthTexture ) {
this._destroyTexture( depthTexture );
}
this.delete( renderTarget );
};
renderTarget.addEventListener( 'dispose', onDispose );
}
}
/**
* Updates the given texture. Depending on the texture state, this method
* triggers the upload of texture data to the GPU memory. If the texture data are
* not yet ready for the upload, it uses default texture data for as a placeholder.
*
* @param {Texture} texture - The texture to update.
* @param {Object} [options={}] - The options.
*/
updateTexture( texture, options = {} ) {
const textureData = this.get( texture );
if ( textureData.initialized === true && textureData.version === texture.version ) return;
const isRenderTarget = texture.isRenderTargetTexture || texture.isDepthTexture || texture.isFramebufferTexture;
const backend = this.backend;
if ( isRenderTarget && textureData.initialized === true ) {
// it's an update
backend.destroySampler( texture );
backend.destroyTexture( texture );
}
//
if ( texture.isFramebufferTexture ) {
const renderTarget = this.renderer.getRenderTarget();
if ( renderTarget ) {
texture.type = renderTarget.texture.type;
} else {
texture.type = UnsignedByteType;
}
}
//
const { width, height, depth } = this.getSize( texture );
options.width = width;
options.height = height;
options.depth = depth;
options.needsMipmaps = this.needsMipmaps( texture );
options.levels = options.needsMipmaps ? this.getMipLevels( texture, width, height ) : 1;
//
if ( isRenderTarget || texture.isStorageTexture === true ) {
backend.createSampler( texture );
backend.createTexture( texture, options );
textureData.generation = texture.version;
} else {
const needsCreate = textureData.initialized !== true;
if ( needsCreate ) backend.createSampler( texture );
if ( texture.version > 0 ) {
const image = texture.image;
if ( image === undefined ) {
console.warn( 'THREE.Renderer: Texture marked for update but image is undefined.' );
} else if ( image.complete === false ) {
console.warn( 'THREE.Renderer: Texture marked for update but image is incomplete.' );
} else {
if ( texture.images ) {
const images = [];
for ( const image of texture.images ) {
images.push( image );
}
options.images = images;
} else {
options.image = image;
}
if ( textureData.isDefaultTexture === undefined || textureData.isDefaultTexture === true ) {
backend.createTexture( texture, options );
textureData.isDefaultTexture = false;
textureData.generation = texture.version;
}
if ( texture.source.dataReady === true ) backend.updateTexture( texture, options );
if ( options.needsMipmaps && texture.mipmaps.length === 0 ) backend.generateMipmaps( texture );
}
} else {
// async update
backend.createDefaultTexture( texture );
textureData.isDefaultTexture = true;
textureData.generation = texture.version;
}
}
// dispose handler
if ( textureData.initialized !== true ) {
textureData.initialized = true;
textureData.generation = texture.version;
//
this.info.memory.textures ++;
// dispose
const onDispose = () => {
texture.removeEventListener( 'dispose', onDispose );
this._destroyTexture( texture );
this.info.memory.textures --;
};
texture.addEventListener( 'dispose', onDispose );
}
//
textureData.version = texture.version;
}
/**
* Computes the size of the given texture and writes the result
* into the target vector. This vector is also returned by the
* method.
*
* If no texture data are available for the compute yet, the method
* returns default size values.
*
* @param {Texture} texture - The texture to compute the size for.
* @param {Vector3} target - The target vector.
* @return {Vector3} The target vector.
*/
getSize( texture, target = _size$3 ) {
let image = texture.images ? texture.images[ 0 ] : texture.image;
if ( image ) {
if ( image.image !== undefined ) image = image.image;
target.width = image.width || 1;
target.height = image.height || 1;
target.depth = texture.isCubeTexture ? 6 : ( image.depth || 1 );
} else {
target.width = target.height = target.depth = 1;
}
return target;
}
/**
* Computes the number of mipmap levels for the given texture.
*
* @param {Texture} texture - The texture.
* @param {Number} width - The texture's width.
* @param {Number} height - The texture's height.
* @return {Number} The number of mipmap levels.
*/
getMipLevels( texture, width, height ) {
let mipLevelCount;
if ( texture.isCompressedTexture ) {
if ( texture.mipmaps ) {
mipLevelCount = texture.mipmaps.length;
} else {
mipLevelCount = 1;
}
} else {
mipLevelCount = Math.floor( Math.log2( Math.max( width, height ) ) ) + 1;
}
return mipLevelCount;
}
/**
* Returns `true` if the given texture requires mipmaps.
*
* @param {Texture} texture - The texture.
* @return {Boolean} Whether mipmaps are required or not.
*/
needsMipmaps( texture ) {
return this.isEnvironmentTexture( texture ) || texture.isCompressedTexture === true || texture.generateMipmaps;
}
/**
* Returns `true` if the given texture is an environment map.
*
* @param {Texture} texture - The texture.
* @return {Boolean} Whether the given texture is an environment map or not.
*/
isEnvironmentTexture( texture ) {
const mapping = texture.mapping;
return ( mapping === EquirectangularReflectionMapping || mapping === EquirectangularRefractionMapping ) || ( mapping === CubeReflectionMapping || mapping === CubeRefractionMapping );
}
/**
* Frees internal resource when the given texture isn't
* required anymore.
*
* @param {Texture} texture - The texture to destroy.
*/
_destroyTexture( texture ) {
this.backend.destroySampler( texture );
this.backend.destroyTexture( texture );
this.delete( texture );
}
}
/**
* A four-component version of {@link Color} which is internally
* used by the renderer to represents clear color with alpha as
* one object.
*
* @private
* @augments Color
*/
class Color4 extends Color {
/**
* Constructs a new four-component color.
* You can also pass a single THREE.Color, hex or
* string argument to this constructor.
*
* @param {Number|String} [r=1] - The red value.
* @param {Number} [g=1] - The green value.
* @param {Number} [b=1] - The blue value.
* @param {Number} [a=1] - The alpha value.
*/
constructor( r, g, b, a = 1 ) {
super( r, g, b );
this.a = a;
}
/**
* Overwrites the default to honor alpha.
* You can also pass a single THREE.Color, hex or
* string argument to this method.
*
* @param {Number|String} r - The red value.
* @param {Number} g - The green value.
* @param {Number} b - The blue value.
* @param {Number} [a=1] - The alpha value.
* @return {Color4} A reference to this object.
*/
set( r, g, b, a = 1 ) {
this.a = a;
return super.set( r, g, b );
}
/**
* Overwrites the default to honor alpha.
*
* @param {Color4} color - The color to copy.
* @return {Color4} A reference to this object.
*/
copy( color ) {
if ( color.a !== undefined ) this.a = color.a;
return super.copy( color );
}
/**
* Overwrites the default to honor alpha.
*
* @return {Color4} The cloned color.
*/
clone() {
return new this.constructor( this.r, this.g, this.b, this.a );
}
}
/** @module ParameterNode **/
/**
* Special version of {@link PropertyNode} which is used for parameters.
*
* @augments PropertyNode
*/
class ParameterNode extends PropertyNode {
static get type() {
return 'ParameterNode';
}
/**
* Constructs a new parameter node.
*
* @param {String} nodeType - The type of the node.
* @param {String?} [name=null] - The name of the parameter in the shader.
*/
constructor( nodeType, name = null ) {
super( nodeType, name );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isParameterNode = true;
}
getHash() {
return this.uuid;
}
generate() {
return this.name;
}
}
/**
* TSL function for creating a parameter node.
*
* @function
* @param {String} type - The type of the node.
* @param {String?} name - The name of the parameter in the shader.
* @returns {ParameterNode}
*/
const parameter = ( type, name ) => nodeObject( new ParameterNode( type, name ) );
/** @module StackNode **/
/**
* Stack is a helper for Nodes that need to produce stack-based code instead of continuous flow.
* They are usually needed in cases like `If`, `Else`.
*
* @augments Node
*/
class StackNode extends Node {
static get type() {
return 'StackNode';
}
/**
* Constructs a new stack node.
*
* @param {StackNode?} [parent=null] - The parent stack node.
*/
constructor( parent = null ) {
super();
/**
* List of nodes.
*
* @type {Array<Node>}
*/
this.nodes = [];
/**
* The output node.
*
* @type {Node?}
* @default null
*/
this.outputNode = null;
/**
* The parent stack node.
*
* @type {StackNode}
* @default null
*/
this.parent = parent;
/**
* The current conditional node.
*
* @private
* @type {ConditionalNode}
* @default null
*/
this._currentCond = null;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isStackNode = true;
}
getNodeType( builder ) {
return this.outputNode ? this.outputNode.getNodeType( builder ) : 'void';
}
getMemberType( builder, name ) {
return this.outputNode ? this.outputNode.getMemberType( builder, name ) : 'void';
}
/**
* Adds a node to this stack.
*
* @param {Node} node - The node to add.
* @return {StackNode} A reference to this stack node.
*/
add( node ) {
this.nodes.push( node );
return this;
}
/**
* Represent an `if` statement in TSL.
*
* @param {Node} boolNode - Represents the condition.
* @param {Function} method - TSL code which is executed if the condition evaluates to `true`.
* @return {StackNode} A reference to this stack node.
*/
If( boolNode, method ) {
const methodNode = new ShaderNode( method );
this._currentCond = select( boolNode, methodNode );
return this.add( this._currentCond );
}
/**
* Represent an `elseif` statement in TSL.
*
* @param {Node} boolNode - Represents the condition.
* @param {Function} method - TSL code which is executed if the condition evaluates to `true`.
* @return {StackNode} A reference to this stack node.
*/
ElseIf( boolNode, method ) {
const methodNode = new ShaderNode( method );
const ifNode = select( boolNode, methodNode );
this._currentCond.elseNode = ifNode;
this._currentCond = ifNode;
return this;
}
/**
* Represent an `else` statement in TSL.
*
* @param {Function} method - TSL code which is executed in the `else` case.
* @return {StackNode} A reference to this stack node.
*/
Else( method ) {
this._currentCond.elseNode = new ShaderNode( method );
return this;
}
build( builder, ...params ) {
const previousStack = getCurrentStack();
setCurrentStack( this );
for ( const node of this.nodes ) {
node.build( builder, 'void' );
}
setCurrentStack( previousStack );
return this.outputNode ? this.outputNode.build( builder, ...params ) : super.build( builder, ...params );
}
// Deprecated
/**
* @function
* @deprecated since r168. Use {@link StackNode#Else} instead.
*
* @param {...any} params
* @returns {StackNode}
*/
else( ...params ) { // @deprecated, r168
console.warn( 'TSL.StackNode: .else() has been renamed to .Else().' );
return this.Else( ...params );
}
/**
* @deprecated since r168. Use {@link StackNode#ElseIf} instead.
*
* @param {...any} params
* @returns {StackNode}
*/
elseif( ...params ) { // @deprecated, r168
console.warn( 'TSL.StackNode: .elseif() has been renamed to .ElseIf().' );
return this.ElseIf( ...params );
}
}
/**
* TSL function for creating a stack node.
*
* @function
* @param {StackNode?} [parent=null] - The parent stack node.
* @returns {StackNode}
*/
const stack = /*@__PURE__*/ nodeProxy( StackNode );
/** @module StructTypeNode **/
/**
* Generates a layout for struct members.
* This function takes an object representing struct members and returns an array of member layouts.
* Each member layout includes the member's name, type, and whether it is atomic.
*
* @param {Object.<string, string|Object>} members - An object where keys are member names and values are either types (as strings) or objects with type and atomic properties.
* @returns {Array.<{name: string, type: string, atomic: boolean}>} An array of member layouts.
*/
function getMembersLayout( members ) {
return Object.entries( members ).map( ( [ name, value ] ) => {
if ( typeof value === 'string' ) {
return { name, type: value, atomic: false };
}
return { name, type: value.type, atomic: value.atomic || false };
} );
}
/**
* Represents a struct type node in the node-based system.
* This class is used to define and manage the layout and types of struct members.
* It extends the base Node class and provides methods to get the length of the struct,
* retrieve member types, and generate the struct type for a builder.
*
* @augments Node
*/
class StructTypeNode extends Node {
static get type() {
return 'StructTypeNode';
}
/**
* Creates an instance of StructTypeNode.
*
* @param {Object} membersLayout - The layout of the members for the struct.
* @param {string} [name=null] - The optional name of the struct.
*/
constructor( membersLayout, name = null ) {
super( 'struct' );
/**
* The layout of the members for the struct
*
* @type {Array.<{name: string, type: string, atomic: boolean}>}
*/
this.membersLayout = getMembersLayout( membersLayout );
/**
* The name of the struct.
*
* @type {String}
* @default null
*/
this.name = name;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isStructLayoutNode = true;
}
/**
* Returns the length of the struct.
* The length is calculated by summing the lengths of the struct's members.
*
* @returns {Number} The length of the struct.
*/
getLength() {
let length = 0;
for ( const member of this.membersLayout ) {
length += getLengthFromType( member.type );
}
return length;
}
getMemberType( builder, name ) {
const member = this.membersLayout.find( m => m.name === name );
return member ? member.type : 'void';
}
getNodeType( builder ) {
const structType = builder.getStructTypeFromNode( this, this.membersLayout, this.name );
return structType.name;
}
generate( builder ) {
return this.getNodeType( builder );
}
}
/** @module StructNode **/
/**
* StructNode allows to create custom structures with multiple members.
* This can also be used to define structures in attribute and uniform data.
*
* ```js
* // Define a custom struct
* const BoundingBox = struct( { min: 'vec3', max: 'vec3' } );
*
* // Create a new instance of the struct
* const bb = BoundingBox( vec3( 0 ), vec3( 1 ) ); // style 1
* const bb = BoundingBox( { min: vec3( 0 ), max: vec3( 1 ) } ); // style 2
*
* // Access the struct members
* const min = bb.get( 'min' );
*
* // Assign a new value to a member
* min.assign( vec3() );
* ```
* @augments Node
*/
class StructNode extends Node {
static get type() {
return 'StructNode';
}
constructor( structLayoutNode, values ) {
super( 'vec3' );
this.structLayoutNode = structLayoutNode;
this.values = values;
this.isStructNode = true;
}
getNodeType( builder ) {
return this.structLayoutNode.getNodeType( builder );
}
getMemberType( builder, name ) {
return this.structLayoutNode.getMemberType( builder, name );
}
generate( builder ) {
const nodeVar = builder.getVarFromNode( this );
const structType = nodeVar.type;
const propertyName = builder.getPropertyName( nodeVar );
builder.addLineFlowCode( `${ propertyName } = ${ builder.generateStruct( structType, this.structLayoutNode.membersLayout, this.values ) }`, this );
return nodeVar.name;
}
}
/**
* TSL function for creating a struct node.
*
* @function
* @param {Object} membersLayout - The layout of the struct members.
* @param {string} [name=null] - The name of the struct.
* @returns {Function} The struct function.
*/
const struct = ( membersLayout, name = null ) => {
const structLayout = new StructTypeNode( membersLayout, name );
const struct = ( ...params ) => {
let values = null;
if ( params.length > 0 ) {
if ( params[ 0 ].isNode ) {
values = {};
const names = Object.keys( membersLayout );
for ( let i = 0; i < params.length; i ++ ) {
values[ names[ i ] ] = params[ i ];
}
} else {
values = params[ 0 ];
}
}
return nodeObject( new StructNode( structLayout, values ) );
};
struct.layout = structLayout;
struct.isStruct = true;
return struct;
};
/** @module OutputStructNode **/
/**
* This node can be used to define multiple outputs in a shader programs.
*
* @augments Node
*/
class OutputStructNode extends Node {
static get type() {
return 'OutputStructNode';
}
/**
* Constructs a new output struct node. The constructor can be invoked with an
* arbitrary number of nodes representing the members.
*
* @param {...Node} members - A parameter list of nodes.
*/
constructor( ...members ) {
super();
/**
* An array of nodes which defines the output.
*
* @type {Array<Node>}
*/
this.members = members;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isOutputStructNode = true;
}
getNodeType( builder ) {
const properties = builder.getNodeProperties( this );
if ( properties.membersLayout === undefined ) {
const members = this.members;
const membersLayout = [];
for ( let i = 0; i < members.length; i ++ ) {
const name = 'm' + i;
const type = members[ i ].getNodeType( builder );
membersLayout.push( { name, type, index: i } );
}
properties.membersLayout = membersLayout;
properties.structType = builder.getOutputStructTypeFromNode( this, properties.membersLayout );
}
return properties.structType.name;
}
generate( builder ) {
const propertyName = builder.getOutputStructName();
const members = this.members;
const structPrefix = propertyName !== '' ? propertyName + '.' : '';
for ( let i = 0; i < members.length; i ++ ) {
const snippet = members[ i ].build( builder );
builder.addLineFlowCode( `${ structPrefix }m${ i } = ${ snippet }`, this );
}
return propertyName;
}
}
/**
* TSL function for creating an output struct node.
*
* @function
* @param {...Node} members - A parameter list of nodes.
* @returns {OutputStructNode}
*/
const outputStruct = /*@__PURE__*/ nodeProxy( OutputStructNode );
/** @module MRTNode **/
/**
* Returns the MRT texture index for the given name.
*
* @param {Array<Texture>} textures - The textures of a MRT-configured render target.
* @param {String} name - The name of the MRT texture which index is requested.
* @return {Number} The texture index.
*/
function getTextureIndex( textures, name ) {
for ( let i = 0; i < textures.length; i ++ ) {
if ( textures[ i ].name === name ) {
return i;
}
}
return - 1;
}
/**
* This node can be used setup a MRT context for rendering. A typical MRT setup for
* post-processing is shown below:
* ```js
* const mrtNode = mrt( {
* output: output,
* normal: normalView
* } ) );
* ```
* The MRT output is defined as a dictionary.
*
* @augments OutputStructNode
*/
class MRTNode extends OutputStructNode {
static get type() {
return 'MRTNode';
}
/**
* Constructs a new output struct node.
*
* @param {Object<String, Node>} outputNodes - The MRT outputs.
*/
constructor( outputNodes ) {
super();
/**
* A dictionary representing the MRT outputs. The key
* is the name of the output, the value the node which produces
* the output result.
*
* @type {Object<String, Node>}
*/
this.outputNodes = outputNodes;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isMRTNode = true;
}
/**
* Returns `true` if the MRT node has an output with the given name.
*
* @param {String} name - The name of the output.
* @return {NodeBuilder} Whether the MRT node has an output for the given name or not.
*/
has( name ) {
return this.outputNodes[ name ] !== undefined;
}
/**
* Returns the output node for the given name.
*
* @param {String} name - The name of the output.
* @return {Node} The output node.
*/
get( name ) {
return this.outputNodes[ name ];
}
/**
* Merges the outputs of the given MRT node with the outputs of this node.
*
* @param {MRTNode} mrtNode - The MRT to merge.
* @return {MRTNode} A new MRT node with merged outputs..
*/
merge( mrtNode ) {
const outputs = { ...this.outputNodes, ...mrtNode.outputNodes };
return mrt( outputs );
}
setup( builder ) {
const outputNodes = this.outputNodes;
const mrt = builder.renderer.getRenderTarget();
const members = [];
const textures = mrt.textures;
for ( const name in outputNodes ) {
const index = getTextureIndex( textures, name );
members[ index ] = vec4( outputNodes[ name ] );
}
this.members = members;
return super.setup( builder );
}
}
/**
* TSL function for creating a MRT node.
*
* @function
* @param {Object<String, Node>} outputNodes - The MRT outputs.
* @returns {MRTNode}
*/
const mrt = /*@__PURE__*/ nodeProxy( MRTNode );
/** @module Hash **/
/**
* Generates a hash value in the range `[0, 1]` from the given seed.
*
* @method
* @param {Node<float>} seed - The seed.
* @return {Node<float>} The hash value.
*/
const hash = /*@__PURE__*/ Fn( ( [ seed ] ) => {
// Taken from https://www.shadertoy.com/view/XlGcRh, originally from pcg-random.org
const state = seed.toUint().mul( 747796405 ).add( 2891336453 );
const word = state.shiftRight( state.shiftRight( 28 ).add( 4 ) ).bitXor( state ).mul( 277803737 );
const result = word.shiftRight( 22 ).bitXor( word );
return result.toFloat().mul( 1 / 2 ** 32 ); // Convert to range [0, 1)
} );
/** @module MathUtils **/
/**
* A function that remaps the `[0,1]` interval into the `[0,1]` interval.
* The corners are mapped to `0` and the center to `1`.
* Reference: {@link https://iquilezles.org/articles/functions/}.
*
* @method
* @param {Node<float>} x - The value to remap.
* @param {Node<float>} k - Allows to control the remapping functions shape by rising the parabola to a power `k`.
* @return {Node<float>} The remapped value.
*/
const parabola = ( x, k ) => pow( mul( 4.0, x.mul( sub( 1.0, x ) ) ), k );
/**
* A function that remaps the `[0,1]` interval into the `[0,1]` interval.
* Expands the sides and compresses the center, and keeps `0.5` mapped to `0.5`.
* Reference: {@link https://iquilezles.org/articles/functions/}.
*
* @method
* @param {Node<float>} x - The value to remap.
* @param {Node<float>} k - `k=1` is the identity curve,`k<1` produces the classic `gain()` shape, and `k>1` produces "s" shaped curves.
* @return {Node<float>} The remapped value.
*/
const gain = ( x, k ) => x.lessThan( 0.5 ) ? parabola( x.mul( 2.0 ), k ).div( 2.0 ) : sub( 1.0, parabola( mul( sub( 1.0, x ), 2.0 ), k ).div( 2.0 ) );
/**
* A function that remaps the `[0,1]` interval into the `[0,1]` interval.
* A generalization of the `parabola()`. Keeps the corners mapped to 0 but allows the control of the shape one either side of the curve.
* Reference: {@link https://iquilezles.org/articles/functions/}.
*
* @method
* @param {Node<float>} x - The value to remap.
* @param {Node<float>} a - First control parameter.
* @param {Node<float>} b - Second control parameter.
* @return {Node<float>} The remapped value.
*/
const pcurve = ( x, a, b ) => pow( div( pow( x, a ), add( pow( x, a ), pow( sub( 1.0, x ), b ) ) ), 1.0 / a );
/**
* A phase shifted sinus curve that starts at zero and ends at zero, with bouncing behavior.
* Reference: {@link https://iquilezles.org/articles/functions/}.
*
* @method
* @param {Node<float>} x - The value to compute the sin for.
* @param {Node<float>} k - Controls the amount of bounces.
* @return {Node<float>} The result value.
*/
const sinc = ( x, k ) => sin( PI.mul( k.mul( x ).sub( 1.0 ) ) ).div( PI.mul( k.mul( x ).sub( 1.0 ) ) );
// https://github.com/cabbibo/glsl-tri-noise-3d
/** @module TriNoise3D **/
const tri = /*@__PURE__*/ Fn( ( [ x ] ) => {
return x.fract().sub( .5 ).abs();
} ).setLayout( {
name: 'tri',
type: 'float',
inputs: [
{ name: 'x', type: 'float' }
]
} );
const tri3 = /*@__PURE__*/ Fn( ( [ p ] ) => {
return vec3( tri( p.z.add( tri( p.y.mul( 1. ) ) ) ), tri( p.z.add( tri( p.x.mul( 1. ) ) ) ), tri( p.y.add( tri( p.x.mul( 1. ) ) ) ) );
} ).setLayout( {
name: 'tri3',
type: 'vec3',
inputs: [
{ name: 'p', type: 'vec3' }
]
} );
/**
* Generates a noise value from the given position, speed and time parameters.
*
* @method
* @param {Node<vec3>} position - The position.
* @param {Node<float>} speed - The speed.
* @param {Node<float>} time - The time.
* @return {Node<float>} The generated noise.
*/
const triNoise3D = /*@__PURE__*/ Fn( ( [ position, speed, time ] ) => {
const p = vec3( position ).toVar();
const z = float( 1.4 ).toVar();
const rz = float( 0.0 ).toVar();
const bp = vec3( p ).toVar();
Loop( { start: float( 0.0 ), end: float( 3.0 ), type: 'float', condition: '<=' }, () => {
const dg = vec3( tri3( bp.mul( 2.0 ) ) ).toVar();
p.addAssign( dg.add( time.mul( float( 0.1 ).mul( speed ) ) ) );
bp.mulAssign( 1.8 );
z.mulAssign( 1.5 );
p.mulAssign( 1.2 );
const t = float( tri( p.z.add( tri( p.x.add( tri( p.y ) ) ) ) ) ).toVar();
rz.addAssign( t.div( z ) );
bp.addAssign( 0.14 );
} );
return rz;
} ).setLayout( {
name: 'triNoise3D',
type: 'float',
inputs: [
{ name: 'position', type: 'vec3' },
{ name: 'speed', type: 'float' },
{ name: 'time', type: 'float' }
]
} );
/** @module FunctionOverloadingNode **/
/**
* This class allows to define multiple overloaded versions
* of the same function. Depending on the parameters of the function
* call, the node picks the best-fit overloaded version.
*
* @augments Node
*/
class FunctionOverloadingNode extends Node {
static get type() {
return 'FunctionOverloadingNode';
}
/**
* Constructs a new function overloading node.
*
* @param {Array<Function>} functionNodes - Array of `Fn` function definitions.
* @param {...Node} parametersNodes - A list of parameter nodes.
*/
constructor( functionNodes = [], ...parametersNodes ) {
super();
/**
* Array of `Fn` function definitions.
*
* @type {Array<Function>}
*/
this.functionNodes = functionNodes;
/**
* A list of parameter nodes.
*
* @type {Array<Node>}
*/
this.parametersNodes = parametersNodes;
/**
* The selected overloaded function call.
*
* @private
* @type {ShaderCallNodeInternal}
*/
this._candidateFnCall = null;
/**
* This node is marked as global.
*
* @type {Boolean}
* @default true
*/
this.global = true;
}
/**
* This method is overwritten since the node type is inferred from
* the function's return type.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The node type.
*/
getNodeType() {
return this.functionNodes[ 0 ].shaderNode.layout.type;
}
setup( builder ) {
const params = this.parametersNodes;
let candidateFnCall = this._candidateFnCall;
if ( candidateFnCall === null ) {
let candidateFn = null;
let candidateScore = - 1;
for ( const functionNode of this.functionNodes ) {
const shaderNode = functionNode.shaderNode;
const layout = shaderNode.layout;
if ( layout === null ) {
throw new Error( 'FunctionOverloadingNode: FunctionNode must be a layout.' );
}
const inputs = layout.inputs;
if ( params.length === inputs.length ) {
let score = 0;
for ( let i = 0; i < params.length; i ++ ) {
const param = params[ i ];
const input = inputs[ i ];
if ( param.getNodeType( builder ) === input.type ) {
score ++;
} else {
score = 0;
}
}
if ( score > candidateScore ) {
candidateFn = functionNode;
candidateScore = score;
}
}
}
this._candidateFnCall = candidateFnCall = candidateFn( ...params );
}
return candidateFnCall;
}
}
const overloadingBaseFn = /*@__PURE__*/ nodeProxy( FunctionOverloadingNode );
/**
* TSL function for creating a function overloading node.
*
* @function
* @param {Array<Function>} functionNodes - Array of `Fn` function definitions.
* @returns {FunctionOverloadingNode}
*/
const overloadingFn = ( functionNodes ) => ( ...params ) => overloadingBaseFn( functionNodes, ...params );
/** @module Timer **/
/**
* Represents the elapsed time in seconds.
*
* @type {UniformNode<float>}
*/
const time = /*@__PURE__*/ uniform( 0 ).setGroup( renderGroup ).onRenderUpdate( ( frame ) => frame.time );
/**
* Represents the delta time in seconds.
*
* @type {UniformNode<float>}
*/
const deltaTime = /*@__PURE__*/ uniform( 0 ).setGroup( renderGroup ).onRenderUpdate( ( frame ) => frame.deltaTime );
/**
* Represents the current frame ID.
*
* @type {UniformNode<uint>}
*/
const frameId = /*@__PURE__*/ uniform( 0, 'uint' ).setGroup( renderGroup ).onRenderUpdate( ( frame ) => frame.frameId );
// Deprecated
/**
* @function
* @deprecated since r170. Use {@link time} instead.
*
* @param {Number} [timeScale=1] - The time scale.
* @returns {UniformNode<float>}
*/
const timerLocal = ( timeScale = 1 ) => { // @deprecated, r170
console.warn( 'TSL: timerLocal() is deprecated. Use "time" instead.' );
return time.mul( timeScale );
};
/**
* @function
* @deprecated since r170. Use {@link time} instead.
*
* @param {Number} [timeScale=1] - The time scale.
* @returns {UniformNode<float>}
*/
const timerGlobal = ( timeScale = 1 ) => { // @deprecated, r170
console.warn( 'TSL: timerGlobal() is deprecated. Use "time" instead.' );
return time.mul( timeScale );
};
/**
* @function
* @deprecated since r170. Use {@link deltaTime} instead.
*
* @param {Number} [timeScale=1] - The time scale.
* @returns {UniformNode<float>}
*/
const timerDelta = ( timeScale = 1 ) => { // @deprecated, r170
console.warn( 'TSL: timerDelta() is deprecated. Use "deltaTime" instead.' );
return deltaTime.mul( timeScale );
};
/** @module Oscillators **/
/**
* Generates a sine wave oscillation based on a timer.
*
* @method
* @param {Node<float>} t - The timer to generate the oscillation with.
* @return {Node<float>} The oscillation node.
*/
const oscSine = ( t = time ) => t.add( 0.75 ).mul( Math.PI * 2 ).sin().mul( 0.5 ).add( 0.5 );
/**
* Generates a square wave oscillation based on a timer.
*
* @method
* @param {Node<float>} t - The timer to generate the oscillation with.
* @return {Node<float>} The oscillation node.
*/
const oscSquare = ( t = time ) => t.fract().round();
/**
* Generates a triangle wave oscillation based on a timer.
*
* @method
* @param {Node<float>} t - The timer to generate the oscillation with.
* @return {Node<float>} The oscillation node.
*/
const oscTriangle = ( t = time ) => t.add( 0.5 ).fract().mul( 2 ).sub( 1 ).abs();
/**
* Generates a sawtooth wave oscillation based on a timer.
*
* @method
* @param {Node<float>} t - The timer to generate the oscillation with.
* @return {Node<float>} The oscillation node.
*/
const oscSawtooth = ( t = time ) => t.fract();
/** @module UVUtils **/
/**
* Rotates the given uv coordinates around a center point
*
* @method
* @param {Node<vec2>} uv - The uv coordinates.
* @param {Node<float>} rotation - The rotation defined in radians.
* @param {Node<vec2>} center - The center of rotation
* @return {Node<vec2>} The rotated uv coordinates.
*/
const rotateUV = /*@__PURE__*/ Fn( ( [ uv, rotation, center = vec2( 0.5 ) ] ) => {
return rotate( uv.sub( center ), rotation ).add( center );
} );
/**
* Applies a spherical warping effect to the given uv coordinates.
*
* @method
* @param {Node<vec2>} uv - The uv coordinates.
* @param {Node<float>} strength - The strength of the effect.
* @param {Node<vec2>} center - The center point
* @return {Node<vec2>} The updated uv coordinates.
*/
const spherizeUV = /*@__PURE__*/ Fn( ( [ uv, strength, center = vec2( 0.5 ) ] ) => {
const delta = uv.sub( center );
const delta2 = delta.dot( delta );
const delta4 = delta2.mul( delta2 );
const deltaOffset = delta4.mul( strength );
return uv.add( delta.mul( deltaOffset ) );
} );
/** @module SpriteUtils **/
/**
* This can be used to achieve a billboarding behavior for flat meshes. That means they are
* oriented always towards the camera.
*
* ```js
* material.vertexNode = billboarding();
* ```
*
* @method
* @param {Object} config - The configuration object.
* @param {Node<vec3>?} [config.position=null] - Can be used to define the vertex positions in world space.
* @param {Boolean} [config.horizontal=true] - Whether to follow the camera rotation horizontally or not.
* @param {Boolean} [config.vertical=false] - Whether to follow the camera rotation vertically or not.
* @return {Node<vec3>} The updated vertex position in clip space.
*/
const billboarding = /*@__PURE__*/ Fn( ( { position = null, horizontal = true, vertical = false } ) => {
let worldMatrix;
if ( position !== null ) {
worldMatrix = modelWorldMatrix.toVar();
worldMatrix[ 3 ][ 0 ] = position.x;
worldMatrix[ 3 ][ 1 ] = position.y;
worldMatrix[ 3 ][ 2 ] = position.z;
} else {
worldMatrix = modelWorldMatrix;
}
const modelViewMatrix = cameraViewMatrix.mul( worldMatrix );
if ( defined( horizontal ) ) {
modelViewMatrix[ 0 ][ 0 ] = modelWorldMatrix[ 0 ].length();
modelViewMatrix[ 0 ][ 1 ] = 0;
modelViewMatrix[ 0 ][ 2 ] = 0;
}
if ( defined( vertical ) ) {
modelViewMatrix[ 1 ][ 0 ] = 0;
modelViewMatrix[ 1 ][ 1 ] = modelWorldMatrix[ 1 ].length();
modelViewMatrix[ 1 ][ 2 ] = 0;
}
modelViewMatrix[ 2 ][ 0 ] = 0;
modelViewMatrix[ 2 ][ 1 ] = 0;
modelViewMatrix[ 2 ][ 2 ] = 1;
return cameraProjectionMatrix.mul( modelViewMatrix ).mul( positionLocal );
} );
/** @module ViewportUtils **/
/**
* A special version of a screen uv function that involves a depth comparison
* when computing the final uvs. The function mitigates visual errors when
* using viewport texture nodes for refraction purposes. Without this function
* objects in front of a refractive surface might appear on the refractive surface
* which is incorrect.
*
* @method
* @param {Node<vec2>?} uv - Optional uv coordinates. By default `screenUV` is used.
* @return {Node<vec2>} The update uv coordinates.
*/
const viewportSafeUV = /*@__PURE__*/ Fn( ( [ uv = null ] ) => {
const depth = linearDepth();
const depthDiff = linearDepth( viewportDepthTexture( uv ) ).sub( depth );
const finalUV = depthDiff.lessThan( 0 ).select( screenUV, uv );
return finalUV;
} );
/** @module SpriteSheetUVNode **/
/**
* Can be used to compute texture coordinates for animated sprite sheets.
*
* ```js
* const uvNode = spritesheetUV( vec2( 6, 6 ), uv(), time.mul( animationSpeed ) );
*
* material.colorNode = texture( spriteSheet, uvNode );
* ```
*
* @augments Node
*/
class SpriteSheetUVNode extends Node {
static get type() {
return 'SpriteSheetUVNode';
}
/**
* Constructs a new sprite sheet uv node.
*
* @param {Node<vec2>} countNode - The node that defines the number of sprites in the x and y direction (e.g 6x6).
* @param {Node<vec2>} [uvNode=uv()] - The uv node.
* @param {Node<float>} [frameNode=float()] - The node that defines the current frame/sprite.
*/
constructor( countNode, uvNode = uv(), frameNode = float( 0 ) ) {
super( 'vec2' );
/**
* The node that defines the number of sprites in the x and y direction (e.g 6x6).
*
* @type {Node<vec2>}
*/
this.countNode = countNode;
/**
* The uv node.
*
* @type {Node<vec2>}
*/
this.uvNode = uvNode;
/**
* The node that defines the current frame/sprite.
*
* @type {Node<float>}
*/
this.frameNode = frameNode;
}
setup() {
const { frameNode, uvNode, countNode } = this;
const { width, height } = countNode;
const frameNum = frameNode.mod( width.mul( height ) ).floor();
const column = frameNum.mod( width );
const row = height.sub( frameNum.add( 1 ).div( width ).ceil() );
const scale = countNode.reciprocal();
const uvFrameOffset = vec2( column, row );
return uvNode.add( uvFrameOffset ).mul( scale );
}
}
/**
* TSL function for creating a sprite sheet uv node.
*
* @function
* @param {Node<vec2>} countNode - The node that defines the number of sprites in the x and y direction (e.g 6x6).
* @param {Node<vec2>} [uvNode=uv()] - The uv node.
* @param {Node<float>} [frameNode=float()] - The node that defines the current frame/sprite.
* @returns {SpriteSheetUVNode}
*/
const spritesheetUV = /*@__PURE__*/ nodeProxy( SpriteSheetUVNode );
/** @module TriplanarTexturesNode **/
/**
* Can be used for triplanar texture mapping.
*
* ```js
* material.colorNode = triplanarTexture( texture( diffuseMap ) );
* ```
*
* @augments Node
*/
class TriplanarTexturesNode extends Node {
static get type() {
return 'TriplanarTexturesNode';
}
/**
* Constructs a new triplanar textures node.
*
* @param {Node} textureXNode - First texture node.
* @param {Node?} [textureYNode=null] - Second texture node. When not set, the shader will sample from `textureXNode` instead.
* @param {Node?} [textureZNode=null] - Third texture node. When not set, the shader will sample from `textureXNode` instead.
* @param {Node<float>?} [scaleNode=float(1)] - The scale node.
* @param {Node<vec3>?} [positionNode=positionLocal] - Vertex positions in local space.
* @param {Node<vec3>?} [normalNode=normalLocal] - Normals in local space.
*/
constructor( textureXNode, textureYNode = null, textureZNode = null, scaleNode = float( 1 ), positionNode = positionLocal, normalNode = normalLocal ) {
super( 'vec4' );
/**
* First texture node.
*
* @type {Node}
*/
this.textureXNode = textureXNode;
/**
* Second texture node. When not set, the shader will sample from `textureXNode` instead.
*
* @type {Node}
* @default null
*/
this.textureYNode = textureYNode;
/**
* Third texture node. When not set, the shader will sample from `textureXNode` instead.
*
* @type {Node}
* @default null
*/
this.textureZNode = textureZNode;
/**
* The scale node.
*
* @type {Node<float>}
* @default float(1)
*/
this.scaleNode = scaleNode;
/**
* Vertex positions in local space.
*
* @type {Node<vec3>}
* @default positionLocal
*/
this.positionNode = positionNode;
/**
* Normals in local space.
*
* @type {Node<vec3>}
* @default normalLocal
*/
this.normalNode = normalNode;
}
setup() {
const { textureXNode, textureYNode, textureZNode, scaleNode, positionNode, normalNode } = this;
// Ref: https://github.com/keijiro/StandardTriplanar
// Blending factor of triplanar mapping
let bf = normalNode.abs().normalize();
bf = bf.div( bf.dot( vec3( 1.0 ) ) );
// Triplanar mapping
const tx = positionNode.yz.mul( scaleNode );
const ty = positionNode.zx.mul( scaleNode );
const tz = positionNode.xy.mul( scaleNode );
// Base color
const textureX = textureXNode.value;
const textureY = textureYNode !== null ? textureYNode.value : textureX;
const textureZ = textureZNode !== null ? textureZNode.value : textureX;
const cx = texture( textureX, tx ).mul( bf.x );
const cy = texture( textureY, ty ).mul( bf.y );
const cz = texture( textureZ, tz ).mul( bf.z );
return add( cx, cy, cz );
}
}
/**
* TSL function for creating a triplanar textures node.
*
* @function
* @param {Node} textureXNode - First texture node.
* @param {Node?} [textureYNode=null] - Second texture node. When not set, the shader will sample from `textureXNode` instead.
* @param {Node?} [textureZNode=null] - Third texture node. When not set, the shader will sample from `textureXNode` instead.
* @param {Node<float>?} [scaleNode=float(1)] - The scale node.
* @param {Node<vec3>?} [positionNode=positionLocal] - Vertex positions in local space.
* @param {Node<vec3>?} [normalNode=normalLocal] - Normals in local space.
* @returns {TriplanarTexturesNode}
*/
const triplanarTextures = /*@__PURE__*/ nodeProxy( TriplanarTexturesNode );
/**
* TSL function for creating a triplanar textures node.
*
* @function
* @param {Node} textureXNode - First texture node.
* @param {Node?} [textureYNode=null] - Second texture node. When not set, the shader will sample from `textureXNode` instead.
* @param {Node?} [textureZNode=null] - Third texture node. When not set, the shader will sample from `textureXNode` instead.
* @param {Node<float>?} [scaleNode=float(1)] - The scale node.
* @param {Node<vec3>?} [positionNode=positionLocal] - Vertex positions in local space.
* @param {Node<vec3>?} [normalNode=normalLocal] - Normals in local space.
* @returns {TriplanarTexturesNode}
*/
const triplanarTexture = ( ...params ) => triplanarTextures( ...params );
/** @module ReflectorNode **/
const _reflectorPlane = new Plane();
const _normal = new Vector3();
const _reflectorWorldPosition = new Vector3();
const _cameraWorldPosition = new Vector3();
const _rotationMatrix = new Matrix4();
const _lookAtPosition = new Vector3( 0, 0, - 1 );
const clipPlane = new Vector4();
const _view = new Vector3();
const _target = new Vector3();
const _q = new Vector4();
const _size$2 = new Vector2();
const _defaultRT = new RenderTarget();
const _defaultUV = screenUV.flipX();
_defaultRT.depthTexture = new DepthTexture( 1, 1 );
let _inReflector = false;
/**
* This node can be used to implement mirror-like flat reflective surfaces.
*
* ```js
* const groundReflector = reflector();
* material.colorNode = groundReflector;
*
* const plane = new Mesh( geometry, material );
* plane.add( groundReflector.target );
* ```
*
* @augments module:TextureNode~TextureNode
*/
class ReflectorNode extends TextureNode {
static get type() {
return 'ReflectorNode';
}
/**
* Constructs a new reflector node.
*
* @param {Object} [parameters={}] - An object holding configuration parameters.
* @param {Object3D} [parameters.target=new Object3D()] - The 3D object the reflector is linked to.
* @param {Number} [parameters.resolution=1] - The resolution scale.
* @param {Boolean} [parameters.generateMipmaps=false] - Whether mipmaps should be generated or not.
* @param {Boolean} [parameters.bounces=true] - Whether reflectors can render other reflector nodes or not.
* @param {Boolean} [parameters.depth=false] - Whether depth data should be generated or not.
* @param {TextureNode} [parameters.defaultTexture] - The default texture node.
* @param {ReflectorBaseNode} [parameters.reflector] - The reflector base node.
*/
constructor( parameters = {} ) {
super( parameters.defaultTexture || _defaultRT.texture, _defaultUV );
/**
* A reference to the internal reflector base node which holds the actual implementation.
*
* @private
* @type {ReflectorBaseNode?}
* @default null
*/
this._reflectorBaseNode = parameters.reflector || new ReflectorBaseNode( this, parameters );
/**
* A reference to the internal depth node.
*
* @private
* @type {Node?}
* @default null
*/
this._depthNode = null;
this.setUpdateMatrix( false );
}
/**
* A reference to the internal reflector node.
*
* @type {ReflectorBaseNode}
*/
get reflector() {
return this._reflectorBaseNode;
}
/**
* A reference to 3D object the reflector is linked to.
*
* @type {Object3D}
*/
get target() {
return this._reflectorBaseNode.target;
}
/**
* Returns a node representing the mirror's depth. That can be used
* to implement more advanced reflection effects like distance attenuation.
*
* @return {Node} The depth node.
*/
getDepthNode() {
if ( this._depthNode === null ) {
if ( this._reflectorBaseNode.depth !== true ) {
throw new Error( 'THREE.ReflectorNode: Depth node can only be requested when the reflector is created with { depth: true }. ' );
}
this._depthNode = nodeObject( new ReflectorNode( {
defaultTexture: _defaultRT.depthTexture,
reflector: this._reflectorBaseNode
} ) );
}
return this._depthNode;
}
setup( builder ) {
// ignore if used in post-processing
if ( ! builder.object.isQuadMesh ) this._reflectorBaseNode.build( builder );
return super.setup( builder );
}
clone() {
const texture = new this.constructor( this.reflectorNode );
texture._reflectorBaseNode = this._reflectorBaseNode;
return texture;
}
}
/**
* Holds the actual implementation of the reflector.
*
* TODO: Explain why `ReflectorBaseNode`. Originally the entire logic was implemented
* in `ReflectorNode`, see #29619.
*
* @private
* @augments Node
*/
class ReflectorBaseNode extends Node {
static get type() {
return 'ReflectorBaseNode';
}
/**
* Constructs a new reflector base node.
*
* @param {TextureNode} textureNode - Represents the rendered reflections as a texture node.
* @param {Object} [parameters={}] - An object holding configuration parameters.
* @param {Object3D} [parameters.target=new Object3D()] - The 3D object the reflector is linked to.
* @param {Number} [parameters.resolution=1] - The resolution scale.
* @param {Boolean} [parameters.generateMipmaps=false] - Whether mipmaps should be generated or not.
* @param {Boolean} [parameters.bounces=true] - Whether reflectors can render other reflector nodes or not.
* @param {Boolean} [parameters.depth=false] - Whether depth data should be generated or not.
*/
constructor( textureNode, parameters = {} ) {
super();
const {
target = new Object3D(),
resolution = 1,
generateMipmaps = false,
bounces = true,
depth = false
} = parameters;
/**
* Represents the rendered reflections as a texture node.
*
* @type {TextureNode}
*/
this.textureNode = textureNode;
/**
* The 3D object the reflector is linked to.
*
* @type {Object3D}
* @default {new Object3D()}
*/
this.target = target;
/**
* The resolution scale.
*
* @type {Number}
* @default {1}
*/
this.resolution = resolution;
/**
* Whether mipmaps should be generated or not.
*
* @type {Boolean}
* @default {false}
*/
this.generateMipmaps = generateMipmaps;
/**
* Whether reflectors can render other reflector nodes or not.
*
* @type {Boolean}
* @default {true}
*/
this.bounces = bounces;
/**
* Whether depth data should be generated or not.
*
* @type {Boolean}
* @default {false}
*/
this.depth = depth;
/**
* The `updateBeforeType` is set to `NodeUpdateType.RENDER` when {@link ReflectorBaseNode#bounces}
* is `true`. Otherwise it's `NodeUpdateType.FRAME`.
*
* @type {String}
* @default 'render'
*/
this.updateBeforeType = bounces ? NodeUpdateType.RENDER : NodeUpdateType.FRAME;
/**
* Weak map for managing virtual cameras.
*
* @type {WeakMap<Camera, Camera>}
*/
this.virtualCameras = new WeakMap();
/**
* Weak map for managing render targets.
*
* @type {WeakMap<Camera, RenderTarget>}
*/
this.renderTargets = new WeakMap();
}
/**
* Updates the resolution of the internal render target.
*
* @private
* @param {RenderTarget} renderTarget - The render target to resize.
* @param {Renderer} renderer - The renderer that is used to determine the new size.
*/
_updateResolution( renderTarget, renderer ) {
const resolution = this.resolution;
renderer.getDrawingBufferSize( _size$2 );
renderTarget.setSize( Math.round( _size$2.width * resolution ), Math.round( _size$2.height * resolution ) );
}
setup( builder ) {
this._updateResolution( _defaultRT, builder.renderer );
return super.setup( builder );
}
/**
* Returns a virtual camera for the given camera. The virtual camera is used to
* render the scene from the reflector's view so correct reflections can be produced.
*
* @param {Camera} camera - The scene's camera.
* @return {Camera} The corresponding virtual camera.
*/
getVirtualCamera( camera ) {
let virtualCamera = this.virtualCameras.get( camera );
if ( virtualCamera === undefined ) {
virtualCamera = camera.clone();
this.virtualCameras.set( camera, virtualCamera );
}
return virtualCamera;
}
/**
* Returns a render target for the given camera. The reflections are rendered
* into this render target.
*
* @param {Camera} camera - The scene's camera.
* @return {RenderTarget} The render target.
*/
getRenderTarget( camera ) {
let renderTarget = this.renderTargets.get( camera );
if ( renderTarget === undefined ) {
renderTarget = new RenderTarget( 0, 0, { type: HalfFloatType } );
if ( this.generateMipmaps === true ) {
renderTarget.texture.minFilter = LinearMipMapLinearFilter;
renderTarget.texture.generateMipmaps = true;
}
if ( this.depth === true ) {
renderTarget.depthTexture = new DepthTexture();
}
this.renderTargets.set( camera, renderTarget );
}
return renderTarget;
}
updateBefore( frame ) {
if ( this.bounces === false && _inReflector ) return false;
_inReflector = true;
const { scene, camera, renderer, material } = frame;
const { target } = this;
const virtualCamera = this.getVirtualCamera( camera );
const renderTarget = this.getRenderTarget( virtualCamera );
renderer.getDrawingBufferSize( _size$2 );
this._updateResolution( renderTarget, renderer );
//
_reflectorWorldPosition.setFromMatrixPosition( target.matrixWorld );
_cameraWorldPosition.setFromMatrixPosition( camera.matrixWorld );
_rotationMatrix.extractRotation( target.matrixWorld );
_normal.set( 0, 0, 1 );
_normal.applyMatrix4( _rotationMatrix );
_view.subVectors( _reflectorWorldPosition, _cameraWorldPosition );
// Avoid rendering when reflector is facing away
if ( _view.dot( _normal ) > 0 ) return;
_view.reflect( _normal ).negate();
_view.add( _reflectorWorldPosition );
_rotationMatrix.extractRotation( camera.matrixWorld );
_lookAtPosition.set( 0, 0, - 1 );
_lookAtPosition.applyMatrix4( _rotationMatrix );
_lookAtPosition.add( _cameraWorldPosition );
_target.subVectors( _reflectorWorldPosition, _lookAtPosition );
_target.reflect( _normal ).negate();
_target.add( _reflectorWorldPosition );
//
virtualCamera.coordinateSystem = camera.coordinateSystem;
virtualCamera.position.copy( _view );
virtualCamera.up.set( 0, 1, 0 );
virtualCamera.up.applyMatrix4( _rotationMatrix );
virtualCamera.up.reflect( _normal );
virtualCamera.lookAt( _target );
virtualCamera.near = camera.near;
virtualCamera.far = camera.far;
virtualCamera.updateMatrixWorld();
virtualCamera.projectionMatrix.copy( camera.projectionMatrix );
// Now update projection matrix with new clip plane, implementing code from: http://www.terathon.com/code/oblique.html
// Paper explaining this technique: http://www.terathon.com/lengyel/Lengyel-Oblique.pdf
_reflectorPlane.setFromNormalAndCoplanarPoint( _normal, _reflectorWorldPosition );
_reflectorPlane.applyMatrix4( virtualCamera.matrixWorldInverse );
clipPlane.set( _reflectorPlane.normal.x, _reflectorPlane.normal.y, _reflectorPlane.normal.z, _reflectorPlane.constant );
const projectionMatrix = virtualCamera.projectionMatrix;
_q.x = ( Math.sign( clipPlane.x ) + projectionMatrix.elements[ 8 ] ) / projectionMatrix.elements[ 0 ];
_q.y = ( Math.sign( clipPlane.y ) + projectionMatrix.elements[ 9 ] ) / projectionMatrix.elements[ 5 ];
_q.z = - 1.0;
_q.w = ( 1.0 + projectionMatrix.elements[ 10 ] ) / projectionMatrix.elements[ 14 ];
// Calculate the scaled plane vector
clipPlane.multiplyScalar( 1.0 / clipPlane.dot( _q ) );
const clipBias = 0;
// Replacing the third row of the projection matrix
projectionMatrix.elements[ 2 ] = clipPlane.x;
projectionMatrix.elements[ 6 ] = clipPlane.y;
projectionMatrix.elements[ 10 ] = ( renderer.coordinateSystem === WebGPUCoordinateSystem ) ? ( clipPlane.z - clipBias ) : ( clipPlane.z + 1.0 - clipBias );
projectionMatrix.elements[ 14 ] = clipPlane.w;
//
this.textureNode.value = renderTarget.texture;
if ( this.depth === true ) {
this.textureNode.getDepthNode().value = renderTarget.depthTexture;
}
material.visible = false;
const currentRenderTarget = renderer.getRenderTarget();
const currentMRT = renderer.getMRT();
const currentAutoClear = renderer.autoClear;
renderer.setMRT( null );
renderer.setRenderTarget( renderTarget );
renderer.autoClear = true;
renderer.render( scene, virtualCamera );
renderer.setMRT( currentMRT );
renderer.setRenderTarget( currentRenderTarget );
renderer.autoClear = currentAutoClear;
material.visible = true;
_inReflector = false;
}
}
/**
* TSL function for creating a reflector node.
*
* @function
* @param {Object} [parameters={}] - An object holding configuration parameters.
* @param {Object3D} [parameters.target=new Object3D()] - The 3D object the reflector is linked to.
* @param {Number} [parameters.resolution=1] - The resolution scale.
* @param {Boolean} [parameters.generateMipmaps=false] - Whether mipmaps should be generated or not.
* @param {Boolean} [parameters.bounces=true] - Whether reflectors can render other reflector nodes or not.
* @param {Boolean} [parameters.depth=false] - Whether depth data should be generated or not.
* @param {TextureNode} [parameters.defaultTexture] - The default texture node.
* @param {ReflectorBaseNode} [parameters.reflector] - The reflector base node.
* @returns {ReflectorNode}
*/
const reflector = ( parameters ) => nodeObject( new ReflectorNode( parameters ) );
const _camera = /*@__PURE__*/ new OrthographicCamera( - 1, 1, 1, - 1, 0, 1 );
/**
* The purpose of this special geometry is to fill the entire viewport with a single triangle.
*
* Reference: {@link https://github.com/mrdoob/three.js/pull/21358}
*
* @private
* @augments BufferGeometry
*/
class QuadGeometry extends BufferGeometry {
/**
* Constructs a new quad geometry.
*
* @param {Boolean} [flipY=false] - Whether the uv coordinates should be flipped along the vertical axis or not.
*/
constructor( flipY = false ) {
super();
const uv = flipY === false ? [ 0, - 1, 0, 1, 2, 1 ] : [ 0, 2, 0, 0, 2, 0 ];
this.setAttribute( 'position', new Float32BufferAttribute( [ - 1, 3, 0, - 1, - 1, 0, 3, - 1, 0 ], 3 ) );
this.setAttribute( 'uv', new Float32BufferAttribute( uv, 2 ) );
}
}
const _geometry = /*@__PURE__*/ new QuadGeometry();
/**
* This module is a helper for passes which need to render a full
* screen effect which is quite common in context of post processing.
*
* The intended usage is to reuse a single quad mesh for rendering
* subsequent passes by just reassigning the `material` reference.
*
* @augments Mesh
*/
class QuadMesh extends Mesh {
/**
* Constructs a new quad mesh.
*
* @param {Material?} [material=null] - The material to render the quad mesh with.
*/
constructor( material = null ) {
super( _geometry, material );
/**
* The camera to render the quad mesh with.
*
* @type {OrthographicCamera}
* @readonly
*/
this.camera = _camera;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isQuadMesh = true;
}
/**
* Async version of `render()`.
*
* @async
* @param {Renderer} renderer - The renderer.
* @return {Promise} A Promise that resolves when the render has been finished.
*/
async renderAsync( renderer ) {
return renderer.renderAsync( this, _camera );
}
/**
* Renders the quad mesh
*
* @param {Renderer} renderer - The renderer.
*/
render( renderer ) {
renderer.render( this, _camera );
}
}
/** @module RTTNode **/
const _size$1 = /*@__PURE__*/ new Vector2();
/**
* `RTTNode` takes another node and uses it with a `QuadMesh` to render into a texture (RTT).
* This module is especially relevant in context of post processing where certain nodes require
* texture input for their effects. With the helper function `convertToTexture()` which is based
* on this module, the node system can automatically ensure texture input if required.
*
* @augments module:TextureNode~TextureNode
*/
class RTTNode extends TextureNode {
static get type() {
return 'RTTNode';
}
/**
* Constructs a new RTT node.
*
* @param {Node} node - The node to render a texture with.
* @param {Number?} [width=null] - The width of the internal render target. If not width is applied, the render target is automatically resized.
* @param {Number?} [height=null] - The height of the internal render target.
* @param {Object} [options={type:HalfFloatType}] - The options for the internal render target.
*/
constructor( node, width = null, height = null, options = { type: HalfFloatType } ) {
const renderTarget = new RenderTarget( width, height, options );
super( renderTarget.texture, uv() );
/**
* The node to render a texture with.
*
* @type {Node}
*/
this.node = node;
/**
* The width of the internal render target.
* If not width is applied, the render target is automatically resized.
*
* @type {Number?}
* @default null
*/
this.width = width;
/**
* The height of the internal render target.
*
* @type {Number?}
* @default null
*/
this.height = height;
/**
* The pixel ratio
*
* @type {Number}
* @default 1
*/
this.pixelRatio = 1;
/**
* The render target
*
* @type {RenderTarget}
*/
this.renderTarget = renderTarget;
/**
* Whether the texture requires an update or not.
*
* @type {Boolean}
* @default true
*/
this.textureNeedsUpdate = true;
/**
* Whether the texture should automatically be updated or not.
*
* @type {Boolean}
* @default true
*/
this.autoUpdate = true;
/**
* The node which is used with the quad mesh for RTT.
*
* @private
* @type {Node}
* @default null
*/
this._rttNode = null;
/**
* The internal quad mesh for RTT.
*
* @private
* @type {QuadMesh}
*/
this._quadMesh = new QuadMesh( new NodeMaterial() );
/**
* The `updateBeforeType` is set to `NodeUpdateType.RENDER` since the node updates
* the texture once per render in its {@link RTTNode#updateBefore} method.
*
* @type {String}
* @default 'render'
*/
this.updateBeforeType = NodeUpdateType.RENDER;
}
/**
* Whether the internal render target should automatically be resized or not.
*
* @type {Boolean}
* @readonly
* @default true
*/
get autoSize() {
return this.width === null;
}
setup( builder ) {
this._rttNode = this.node.context( builder.getSharedContext() );
this._quadMesh.material.name = 'RTT';
this._quadMesh.material.needsUpdate = true;
return super.setup( builder );
}
/**
* Sets the size of the internal render target
*
* @param {Number} width - The width to set.
* @param {Number} height - The width to set.
*/
setSize( width, height ) {
this.width = width;
this.height = height;
const effectiveWidth = width * this.pixelRatio;
const effectiveHeight = height * this.pixelRatio;
this.renderTarget.setSize( effectiveWidth, effectiveHeight );
this.textureNeedsUpdate = true;
}
/**
* Sets the pixel ratio. This will also resize the render target.
*
* @param {Number} pixelRatio - The pixel ratio to set.
*/
setPixelRatio( pixelRatio ) {
this.pixelRatio = pixelRatio;
this.setSize( this.width, this.height );
}
updateBefore( { renderer } ) {
if ( this.textureNeedsUpdate === false && this.autoUpdate === false ) return;
this.textureNeedsUpdate = false;
//
if ( this.autoSize === true ) {
this.pixelRatio = renderer.getPixelRatio();
const size = renderer.getSize( _size$1 );
this.setSize( size.width, size.height );
}
//
this._quadMesh.material.fragmentNode = this._rttNode;
//
const currentRenderTarget = renderer.getRenderTarget();
renderer.setRenderTarget( this.renderTarget );
this._quadMesh.render( renderer );
renderer.setRenderTarget( currentRenderTarget );
}
clone() {
const newNode = new TextureNode( this.value, this.uvNode, this.levelNode );
newNode.sampler = this.sampler;
newNode.referenceNode = this;
return newNode;
}
}
/**
* TSL function for creating a RTT node.
*
* @function
* @param {Node} node - The node to render a texture with.
* @param {Number?} [width=null] - The width of the internal render target. If not width is applied, the render target is automatically resized.
* @param {Number?} [height=null] - The height of the internal render target.
* @param {Object} [options={type:HalfFloatType}] - The options for the internal render target.
* @returns {RTTNode}
*/
const rtt = ( node, ...params ) => nodeObject( new RTTNode( nodeObject( node ), ...params ) );
/**
* TSL function for converting nodes to textures nodes.
*
* @function
* @param {Node} node - The node to render a texture with.
* @param {Number?} [width=null] - The width of the internal render target. If not width is applied, the render target is automatically resized.
* @param {Number?} [height=null] - The height of the internal render target.
* @param {Object} [options={type:HalfFloatType}] - The options for the internal render target.
* @returns {RTTNode}
*/
const convertToTexture = ( node, ...params ) => {
if ( node.isTextureNode ) return node;
if ( node.isPassNode ) return node.getTextureNode();
return rtt( node, ...params );
};
/** @module PostProcessingUtils **/
/**
* Computes a position in view space based on a fragment's screen position expressed as uv coordinates, the fragments
* depth value and the camera's inverse projection matrix.
*
* @method
* @param {Node<vec2>} screenPosition - The fragment's screen position expressed as uv coordinates.
* @param {Node<float>} depth - The fragment's depth value.
* @param {Node<mat4>} projectionMatrixInverse - The camera's inverse projection matrix.
* @return {Node<vec3>} The fragments position in view space.
*/
const getViewPosition = /*@__PURE__*/ Fn( ( [ screenPosition, depth, projectionMatrixInverse ], builder ) => {
let clipSpacePosition;
if ( builder.renderer.coordinateSystem === WebGPUCoordinateSystem ) {
screenPosition = vec2( screenPosition.x, screenPosition.y.oneMinus() ).mul( 2.0 ).sub( 1.0 );
clipSpacePosition = vec4( vec3( screenPosition, depth ), 1.0 );
} else {
clipSpacePosition = vec4( vec3( screenPosition.x, screenPosition.y.oneMinus(), depth ).mul( 2.0 ).sub( 1.0 ), 1.0 );
}
const viewSpacePosition = vec4( projectionMatrixInverse.mul( clipSpacePosition ) );
return viewSpacePosition.xyz.div( viewSpacePosition.w );
} );
/**
* Computes a screen position expressed as uv coordinates based on a fragment's position in view space
* and the camera's projection matrix
*
* @method
* @param {Node<vec3>} viewPosition - The fragments position in view space.
* @param {Node<mat4>} projectionMatrix - The camera's projection matrix.
* @return {Node<vec2>} The fragment's screen position expressed as uv coordinates.
*/
const getScreenPosition = /*@__PURE__*/ Fn( ( [ viewPosition, projectionMatrix ] ) => {
const sampleClipPos = projectionMatrix.mul( vec4( viewPosition, 1.0 ) );
const sampleUv = sampleClipPos.xy.div( sampleClipPos.w ).mul( 0.5 ).add( 0.5 ).toVar();
return vec2( sampleUv.x, sampleUv.y.oneMinus() );
} );
/**
* Computes a normal vector based on depth data. Can be used as a fallback when no normal render
* target is available or if flat surface normals are required.
*
* @method
* @param {Node<vec2>} uv - The texture coordinate.
* @param {DepthTexture} depthTexture - The depth texture.
* @param {Node<mat4>} projectionMatrixInverse - The camera's inverse projection matrix.
* @return {Node<vec3>} The computed normal vector.
*/
const getNormalFromDepth = /*@__PURE__*/ Fn( ( [ uv, depthTexture, projectionMatrixInverse ] ) => {
const size = textureSize( textureLoad( depthTexture ) );
const p = ivec2( uv.mul( size ) ).toVar();
const c0 = textureLoad( depthTexture, p ).toVar();
const l2 = textureLoad( depthTexture, p.sub( ivec2( 2, 0 ) ) ).toVar();
const l1 = textureLoad( depthTexture, p.sub( ivec2( 1, 0 ) ) ).toVar();
const r1 = textureLoad( depthTexture, p.add( ivec2( 1, 0 ) ) ).toVar();
const r2 = textureLoad( depthTexture, p.add( ivec2( 2, 0 ) ) ).toVar();
const b2 = textureLoad( depthTexture, p.add( ivec2( 0, 2 ) ) ).toVar();
const b1 = textureLoad( depthTexture, p.add( ivec2( 0, 1 ) ) ).toVar();
const t1 = textureLoad( depthTexture, p.sub( ivec2( 0, 1 ) ) ).toVar();
const t2 = textureLoad( depthTexture, p.sub( ivec2( 0, 2 ) ) ).toVar();
const dl = abs( sub( float( 2 ).mul( l1 ).sub( l2 ), c0 ) ).toVar();
const dr = abs( sub( float( 2 ).mul( r1 ).sub( r2 ), c0 ) ).toVar();
const db = abs( sub( float( 2 ).mul( b1 ).sub( b2 ), c0 ) ).toVar();
const dt = abs( sub( float( 2 ).mul( t1 ).sub( t2 ), c0 ) ).toVar();
const ce = getViewPosition( uv, c0, projectionMatrixInverse ).toVar();
const dpdx = dl.lessThan( dr ).select( ce.sub( getViewPosition( uv.sub( vec2( float( 1 ).div( size.x ), 0 ) ), l1, projectionMatrixInverse ) ), ce.negate().add( getViewPosition( uv.add( vec2( float( 1 ).div( size.x ), 0 ) ), r1, projectionMatrixInverse ) ) );
const dpdy = db.lessThan( dt ).select( ce.sub( getViewPosition( uv.add( vec2( 0, float( 1 ).div( size.y ) ) ), b1, projectionMatrixInverse ) ), ce.negate().add( getViewPosition( uv.sub( vec2( 0, float( 1 ).div( size.y ) ) ), t1, projectionMatrixInverse ) ) );
return normalize( cross( dpdx, dpdy ) );
} );
/**
* This special type of instanced buffer attribute is intended for compute shaders.
* In earlier three.js versions it was only possible to update attribute data
* on the CPU via JavaScript and then upload the data to the GPU. With the
* new material system and renderer it is now possible to use compute shaders
* to compute the data for an attribute more efficiently on the GPU.
*
* The idea is to create an instance of this class and provide it as an input
* to {@link module:StorageBufferNode}.
*
* Note: This type of buffer attribute can only be used with `WebGPURenderer`.
*
* @augments InstancedBufferAttribute
*/
class StorageInstancedBufferAttribute extends InstancedBufferAttribute {
/**
* Constructs a new storage instanced buffer attribute.
*
* @param {Number|TypedArray} count - The item count. It is also valid to pass a typed array as an argument.
* The subsequent parameters are then obsolete.
* @param {Number} itemSize - The item size.
* @param {TypedArray.constructor} [typeClass=Float32Array] - A typed array constructor.
*/
constructor( count, itemSize, typeClass = Float32Array ) {
const array = ArrayBuffer.isView( count ) ? count : new typeClass( count * itemSize );
super( array, itemSize );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isStorageInstancedBufferAttribute = true;
}
}
/**
* This special type of buffer attribute is intended for compute shaders.
* In earlier three.js versions it was only possible to update attribute data
* on the CPU via JavaScript and then upload the data to the GPU. With the
* new material system and renderer it is now possible to use compute shaders
* to compute the data for an attribute more efficiently on the GPU.
*
* The idea is to create an instance of this class and provide it as an input
* to {@link module:StorageBufferNode}.
*
* Note: This type of buffer attribute can only be used with `WebGPURenderer`.
*
* @augments BufferAttribute
*/
class StorageBufferAttribute extends BufferAttribute {
/**
* Constructs a new storage buffer attribute.
*
* @param {Number|TypedArray} count - The item count. It is also valid to pass a typed array as an argument.
* The subsequent parameters are then obsolete.
* @param {Number} itemSize - The item size.
* @param {TypedArray.constructor} [typeClass=Float32Array] - A typed array constructor.
*/
constructor( count, itemSize, typeClass = Float32Array ) {
const array = ArrayBuffer.isView( count ) ? count : new typeClass( count * itemSize );
super( array, itemSize );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isStorageBufferAttribute = true;
}
}
/** @module StorageArrayElementNode **/
/**
* This class enables element access on instances of {@link StorageBufferNode}.
* In most cases, it is indirectly used when accessing elements with the
* {@link StorageBufferNode#element} method.
*
* ```js
* const position = positionStorage.element( instanceIndex );
* ```
*
* @augments ArrayElementNode
*/
class StorageArrayElementNode extends ArrayElementNode {
static get type() {
return 'StorageArrayElementNode';
}
/**
* Constructs storage buffer element node.
*
* @param {StorageBufferNode} storageBufferNode - The storage buffer node.
* @param {Node} indexNode - The index node that defines the element access.
*/
constructor( storageBufferNode, indexNode ) {
super( storageBufferNode, indexNode );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isStorageArrayElementNode = true;
}
/**
* The storage buffer node.
*
* @param {Node} value
* @type {StorageBufferNode}
*/
set storageBufferNode( value ) {
this.node = value;
}
get storageBufferNode() {
return this.node;
}
getMemberType( builder, name ) {
const structTypeNode = this.storageBufferNode.structTypeNode;
if ( structTypeNode ) {
return structTypeNode.getMemberType( builder, name );
}
return 'void';
}
setup( builder ) {
if ( builder.isAvailable( 'storageBuffer' ) === false ) {
if ( this.node.isPBO === true ) {
builder.setupPBO( this.node );
}
}
return super.setup( builder );
}
generate( builder, output ) {
let snippet;
const isAssignContext = builder.context.assign;
//
if ( builder.isAvailable( 'storageBuffer' ) === false ) {
if ( this.node.isPBO === true && isAssignContext !== true && ( this.node.value.isInstancedBufferAttribute || builder.shaderStage !== 'compute' ) ) {
snippet = builder.generatePBO( this );
} else {
snippet = this.node.build( builder );
}
} else {
snippet = super.generate( builder );
}
if ( isAssignContext !== true ) {
const type = this.getNodeType( builder );
snippet = builder.format( snippet, type, output );
}
return snippet;
}
}
/**
* TSL function for creating a storage element node.
*
* @function
* @param {StorageBufferNode} storageBufferNode - The storage buffer node.
* @param {Node} indexNode - The index node that defines the element access.
* @returns {StorageArrayElementNode}
*/
const storageElement = /*@__PURE__*/ nodeProxy( StorageArrayElementNode );
/** @module StorageBufferNode **/
/**
* This node is used in context of compute shaders and allows to define a
* storage buffer for data. A typical workflow is to create instances of
* this node with the convenience functions `attributeArray()` or `instancedArray()`,
* setup up a compute shader that writes into the buffers and then convert
* the storage buffers to attribute nodes for rendering.
*
* ```js
* const positionBuffer = instancedArray( particleCount, 'vec3' ); // the storage buffer node
*
* const computeInit = Fn( () => { // the compute shader
*
* const position = positionBuffer.element( instanceIndex );
*
* // compute position data
*
* position.x = 1;
* position.y = 1;
* position.z = 1;
*
* } )().compute( particleCount );
*
* const particleMaterial = new THREE.SpriteNodeMaterial();
* particleMaterial.positionNode = positionBuffer.toAttribute();
*
* renderer.computeAsync( computeInit );
*
* ```
*
* @augments BufferNode
*/
class StorageBufferNode extends BufferNode {
static get type() {
return 'StorageBufferNode';
}
/**
* Constructs a new storage buffer node.
*
* @param {StorageBufferAttribute|StorageInstancedBufferAttribute|BufferAttribute} value - The buffer data.
* @param {(String|Struct)?} [bufferType=null] - The buffer type (e.g. `'vec3'`).
* @param {Number} [bufferCount=0] - The buffer count.
*/
constructor( value, bufferType = null, bufferCount = 0 ) {
let nodeType, structTypeNode = null;
if ( bufferType && bufferType.isStruct ) {
nodeType = 'struct';
structTypeNode = bufferType.layout;
} else if ( bufferType === null && ( value.isStorageBufferAttribute || value.isStorageInstancedBufferAttribute ) ) {
nodeType = getTypeFromLength( value.itemSize );
bufferCount = value.count;
} else {
nodeType = bufferType;
}
super( value, nodeType, bufferCount );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isStorageBufferNode = true;
/**
* The buffer struct type.
*
* @type {structTypeNode?}
* @default null
*/
this.structTypeNode = structTypeNode;
/**
* The access type of the texture node.
*
* @type {String}
* @default 'readWrite'
*/
this.access = NodeAccess.READ_WRITE;
/**
* Whether the node is atomic or not.
*
* @type {Boolean}
* @default false
*/
this.isAtomic = false;
/**
* Whether the node represents a PBO or not.
* Only relevant for WebGL.
*
* @type {Boolean}
* @default false
*/
this.isPBO = false;
/**
* A reference to the internal buffer attribute node.
*
* @type {BufferAttributeNode?}
* @default null
*/
this._attribute = null;
/**
* A reference to the internal varying node.
*
* @type {VaryingNode?}
* @default null
*/
this._varying = null;
/**
* `StorageBufferNode` sets this property to `true` by default.
*
* @type {Boolean}
* @default true
*/
this.global = true;
if ( value.isStorageBufferAttribute !== true && value.isStorageInstancedBufferAttribute !== true ) {
// TODO: Improve it, possibly adding a new property to the BufferAttribute to identify it as a storage buffer read-only attribute in Renderer
if ( value.isInstancedBufferAttribute ) value.isStorageInstancedBufferAttribute = true;
else value.isStorageBufferAttribute = true;
}
}
/**
* This method is overwritten since the buffer data might be shared
* and thus the hash should be shared as well.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The hash.
*/
getHash( builder ) {
if ( this.bufferCount === 0 ) {
let bufferData = builder.globalCache.getData( this.value );
if ( bufferData === undefined ) {
bufferData = {
node: this
};
builder.globalCache.setData( this.value, bufferData );
}
return bufferData.node.uuid;
}
return this.uuid;
}
/**
* Overwrites the default implementation to return a fixed value `'indirectStorageBuffer'` or `'storageBuffer'`.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The input type.
*/
getInputType( /*builder*/ ) {
return this.value.isIndirectStorageBufferAttribute ? 'indirectStorageBuffer' : 'storageBuffer';
}
/**
* Enables element access with the given index node.
*
* @param {IndexNode} indexNode - The index node.
* @return {StorageArrayElementNode} A node representing the element access.
*/
element( indexNode ) {
return storageElement( this, indexNode );
}
/**
* Defines whether this node is a PBO or not. Only relevant for WebGL.
*
* @param {Boolean} value - The value so set.
* @return {StorageBufferNode} A reference to this node.
*/
setPBO( value ) {
this.isPBO = value;
return this;
}
/**
* Returns the `isPBO` value.
*
* @return {Boolean} Whether the node represents a PBO or not.
*/
getPBO() {
return this.isPBO;
}
/**
* Defines the node access.
*
* @param {String} value - The node access.
* @return {StorageBufferNode} A reference to this node.
*/
setAccess( value ) {
this.access = value;
return this;
}
/**
* Convenience method for configuring a read-only node access.
*
* @return {StorageBufferNode} A reference to this node.
*/
toReadOnly() {
return this.setAccess( NodeAccess.READ_ONLY );
}
/**
* Defines whether the node is atomic or not.
*
* @param {Boolean} value - The atomic flag.
* @return {StorageBufferNode} A reference to this node.
*/
setAtomic( value ) {
this.isAtomic = value;
return this;
}
/**
* Convenience method for making this node atomic.
*
* @return {StorageBufferNode} A reference to this node.
*/
toAtomic() {
return this.setAtomic( true );
}
/**
* Returns attribute data for this storage buffer node.
*
* @return {{attribute: BufferAttributeNode, varying: VaryingNode}} The attribute data.
*/
getAttributeData() {
if ( this._attribute === null ) {
this._attribute = bufferAttribute( this.value );
this._varying = varying( this._attribute );
}
return {
attribute: this._attribute,
varying: this._varying
};
}
/**
* This method is overwritten since the node type from the availability of storage buffers
* and the attribute data.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The node type.
*/
getNodeType( builder ) {
if ( this.structTypeNode !== null ) {
return this.structTypeNode.getNodeType( builder );
}
if ( builder.isAvailable( 'storageBuffer' ) || builder.isAvailable( 'indirectStorageBuffer' ) ) {
return super.getNodeType( builder );
}
const { attribute } = this.getAttributeData();
return attribute.getNodeType( builder );
}
/**
* Generates the code snippet of the storage buffer node.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The generated code snippet.
*/
generate( builder ) {
if ( this.structTypeNode !== null ) this.structTypeNode.build( builder );
if ( builder.isAvailable( 'storageBuffer' ) || builder.isAvailable( 'indirectStorageBuffer' ) ) {
return super.generate( builder );
}
const { attribute, varying } = this.getAttributeData();
const output = varying.build( builder );
builder.registerTransform( output, attribute );
return output;
}
}
/**
* TSL function for creating a storage buffer node.
*
* @function
* @param {StorageBufferAttribute|StorageInstancedBufferAttribute|BufferAttribute} value - The buffer data.
* @param {(String|Struct)?} [type=null] - The buffer type (e.g. `'vec3'`).
* @param {Number} [count=0] - The buffer count.
* @returns {StorageBufferNode}
*/
const storage = ( value, type = null, count = 0 ) => nodeObject( new StorageBufferNode( value, type, count ) );
/**
* @function
* @deprecated since r171. Use `storage().setPBO( true )` instead.
*
* @param {StorageBufferAttribute|StorageInstancedBufferAttribute|BufferAttribute} value - The buffer data.
* @param {String?} type - The buffer type (e.g. `'vec3'`).
* @param {Number} count - The buffer count.
* @returns {StorageBufferNode}
*/
const storageObject = ( value, type, count ) => { // @deprecated, r171
console.warn( 'THREE.TSL: "storageObject()" is deprecated. Use "storage().setPBO( true )" instead.' );
return storage( value, type, count ).setPBO( true );
};
/** @module Arrays **/
/**
* TSL function for creating a storage buffer node with a configured `StorageBufferAttribute`.
*
* @function
* @param {Number|TypedArray} count - The data count. It is also valid to pass a typed array as an argument.
* @param {String|Struct} [type='float'] - The data type.
* @returns {StorageBufferNode}
*/
const attributeArray = ( count, type = 'float' ) => {
let itemSize, typedArray;
if ( type.isStruct === true ) {
itemSize = type.layout.getLength();
typedArray = getTypedArrayFromType( 'float' );
} else {
itemSize = getLengthFromType( type );
typedArray = getTypedArrayFromType( type );
}
const buffer = new StorageBufferAttribute( count, itemSize, typedArray );
const node = storage( buffer, type, count );
return node;
};
/**
* TSL function for creating a storage buffer node with a configured `StorageInstancedBufferAttribute`.
*
* @function
* @param {Number|TypedArray} count - The data count. It is also valid to pass a typed array as an argument.
* @param {String|Struct} [type='float'] - The data type.
* @returns {StorageBufferNode}
*/
const instancedArray = ( count, type = 'float' ) => {
let itemSize, typedArray;
if ( type.isStruct === true ) {
itemSize = type.layout.getLength();
typedArray = getTypedArrayFromType( 'float' );
} else {
itemSize = getLengthFromType( type );
typedArray = getTypedArrayFromType( type );
}
const buffer = new StorageInstancedBufferAttribute( count, itemSize, typedArray );
const node = storage( buffer, type, count );
return node;
};
/** @module VertexColorNode **/
/**
* An attribute node for representing vertex colors.
*
* @augments module:AttributeNode~AttributeNode
*/
class VertexColorNode extends AttributeNode {
static get type() {
return 'VertexColorNode';
}
/**
* Constructs a new vertex color node.
*
* @param {Number} [index=0] - The attribute index.
*/
constructor( index = 0 ) {
super( null, 'vec4' );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isVertexColorNode = true;
/**
* The attribute index to enable more than one sets of vertex colors.
*
* @type {Number}
* @default 0
*/
this.index = index;
}
/**
* Overwrites the default implementation by honoring the attribute index.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The attribute name.
*/
getAttributeName( /*builder*/ ) {
const index = this.index;
return 'color' + ( index > 0 ? index : '' );
}
generate( builder ) {
const attributeName = this.getAttributeName( builder );
const geometryAttribute = builder.hasGeometryAttribute( attributeName );
let result;
if ( geometryAttribute === true ) {
result = super.generate( builder );
} else {
// Vertex color fallback should be white
result = builder.generateConst( this.nodeType, new Vector4( 1, 1, 1, 1 ) );
}
return result;
}
serialize( data ) {
super.serialize( data );
data.index = this.index;
}
deserialize( data ) {
super.deserialize( data );
this.index = data.index;
}
}
/**
* TSL function for creating a reference node.
*
* @function
* @param {Number} index - The attribute index.
* @returns {VertexColorNode}
*/
const vertexColor = ( index ) => nodeObject( new VertexColorNode( index ) );
/** @module PointUVNode **/
/**
* A node for representing the uv coordinates of points.
*
* Can only be used with a WebGL backend. In WebGPU, point
* primitives always have the size of one pixel and can thus
* can't be used as sprite-like objects that display textures.
*
* @augments Node
*/
class PointUVNode extends Node {
static get type() {
return 'PointUVNode';
}
/**
* Constructs a new point uv node.
*/
constructor() {
super( 'vec2' );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isPointUVNode = true;
}
generate( /*builder*/ ) {
return 'vec2( gl_PointCoord.x, 1.0 - gl_PointCoord.y )';
}
}
/**
* TSL object that represents the uv coordinates of points.
*
* @type {PointUVNode}
*/
const pointUV = /*@__PURE__*/ nodeImmutable( PointUVNode );
const _e1 = /*@__PURE__*/ new Euler();
const _m1 = /*@__PURE__*/ new Matrix4();
/** @module SceneNode **/
/**
* This module allows access to a collection of scene properties. The following predefined TSL objects
* are available for easier use:
*
* - `backgroundBlurriness`: A node that represents the scene's background blurriness.
* - `backgroundIntensity`: A node that represents the scene's background intensity.
* - `backgroundRotation`: A node that represents the scene's background rotation.
*
* @augments Node
*/
class SceneNode extends Node {
static get type() {
return 'SceneNode';
}
/**
* Constructs a new scene node.
*
* @param {('backgroundBlurriness'|'backgroundIntensity'|'backgroundRotation')} scope - The scope defines the type of scene property that is accessed.
* @param {Scene?} [scene=null] - A reference to the scene.
*/
constructor( scope = SceneNode.BACKGROUND_BLURRINESS, scene = null ) {
super();
/**
* The scope defines the type of scene property that is accessed.
*
* @type {('backgroundBlurriness'|'backgroundIntensity'|'backgroundRotation')}
*/
this.scope = scope;
/**
* A reference to the scene that is going to be accessed.
*
* @type {Scene?}
* @default null
*/
this.scene = scene;
}
/**
* Depending on the scope, the method returns a different type of node that represents
* the respective scene property.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {Node} The output node.
*/
setup( builder ) {
const scope = this.scope;
const scene = this.scene !== null ? this.scene : builder.scene;
let output;
if ( scope === SceneNode.BACKGROUND_BLURRINESS ) {
output = reference( 'backgroundBlurriness', 'float', scene );
} else if ( scope === SceneNode.BACKGROUND_INTENSITY ) {
output = reference( 'backgroundIntensity', 'float', scene );
} else if ( scope === SceneNode.BACKGROUND_ROTATION ) {
output = uniform( 'mat4' ).label( 'backgroundRotation' ).setGroup( renderGroup ).onRenderUpdate( () => {
const background = scene.background;
if ( background !== null && background.isTexture && background.mapping !== UVMapping ) {
_e1.copy( scene.backgroundRotation );
// accommodate left-handed frame
_e1.x *= - 1; _e1.y *= - 1; _e1.z *= - 1;
_m1.makeRotationFromEuler( _e1 );
} else {
_m1.identity();
}
return _m1;
} );
} else {
console.error( 'THREE.SceneNode: Unknown scope:', scope );
}
return output;
}
}
SceneNode.BACKGROUND_BLURRINESS = 'backgroundBlurriness';
SceneNode.BACKGROUND_INTENSITY = 'backgroundIntensity';
SceneNode.BACKGROUND_ROTATION = 'backgroundRotation';
/**
* TSL object that represents the scene's background blurriness.
*
* @type {SceneNode}
*/
const backgroundBlurriness = /*@__PURE__*/ nodeImmutable( SceneNode, SceneNode.BACKGROUND_BLURRINESS );
/**
* TSL object that represents the scene's background intensity.
*
* @type {SceneNode}
*/
const backgroundIntensity = /*@__PURE__*/ nodeImmutable( SceneNode, SceneNode.BACKGROUND_INTENSITY );
/**
* TSL object that represents the scene's background rotation.
*
* @type {SceneNode}
*/
const backgroundRotation = /*@__PURE__*/ nodeImmutable( SceneNode, SceneNode.BACKGROUND_ROTATION );
/** @module StorageTextureNode **/
/**
* This special version of a texture node can be used to
* write data into a storage texture with a compute shader.
*
* ```js
* const storageTexture = new THREE.StorageTexture( width, height );
*
* const computeTexture = Fn( ( { storageTexture } ) => {
*
* const posX = instanceIndex.modInt( width );
* const posY = instanceIndex.div( width );
* const indexUV = uvec2( posX, posY );
*
* // generate RGB values
*
* const r = 1;
* const g = 1;
* const b = 1;
*
* textureStore( storageTexture, indexUV, vec4( r, g, b, 1 ) ).toWriteOnly();
*
* } );
*
* const computeNode = computeTexture( { storageTexture } ).compute( width * height );
* renderer.computeAsync( computeNode );
* ```
*
* This node can only be used with a WebGPU backend.
*
* @augments module:TextureNode~TextureNode
*/
class StorageTextureNode extends TextureNode {
static get type() {
return 'StorageTextureNode';
}
/**
* Constructs a new storage texture node.
*
* @param {StorageTexture} value - The storage texture.
* @param {Node<vec2|vec3>} uvNode - The uv node.
* @param {Node?} [storeNode=null] - The value node that should be stored in the texture.
*/
constructor( value, uvNode, storeNode = null ) {
super( value, uvNode );
/**
* The value node that should be stored in the texture.
*
* @type {Node?}
* @default null
*/
this.storeNode = storeNode;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isStorageTextureNode = true;
/**
* The access type of the texture node.
*
* @type {String}
* @default 'writeOnly'
*/
this.access = NodeAccess.WRITE_ONLY;
}
/**
* Overwrites the default implementation to return a fixed value `'storageTexture'`.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The input type.
*/
getInputType( /*builder*/ ) {
return 'storageTexture';
}
setup( builder ) {
super.setup( builder );
const properties = builder.getNodeProperties( this );
properties.storeNode = this.storeNode;
}
/**
* Defines the node access.
*
* @param {String} value - The node access.
* @return {StorageTextureNode} A reference to this node.
*/
setAccess( value ) {
this.access = value;
return this;
}
/**
* Generates the code snippet of the stroge node. If no `storeNode`
* is defined, the texture node is generated as normal texture.
*
* @param {NodeBuilder} builder - The current node builder.
* @param {String} output - The current output.
* @return {String} The generated code snippet.
*/
generate( builder, output ) {
let snippet;
if ( this.storeNode !== null ) {
snippet = this.generateStore( builder );
} else {
snippet = super.generate( builder, output );
}
return snippet;
}
/**
* Convenience method for configuring a read/write node access.
*
* @return {StorageTextureNode} A reference to this node.
*/
toReadWrite() {
return this.setAccess( NodeAccess.READ_WRITE );
}
/**
* Convenience method for configuring a read-only node access.
*
* @return {StorageTextureNode} A reference to this node.
*/
toReadOnly() {
return this.setAccess( NodeAccess.READ_ONLY );
}
/**
* Convenience method for configuring a write-only node access.
*
* @return {StorageTextureNode} A reference to this node.
*/
toWriteOnly() {
return this.setAccess( NodeAccess.WRITE_ONLY );
}
/**
* Generates the code snippet of the storage texture node.
*
* @param {NodeBuilder} builder - The current node builder.
*/
generateStore( builder ) {
const properties = builder.getNodeProperties( this );
const { uvNode, storeNode } = properties;
const textureProperty = super.generate( builder, 'property' );
const uvSnippet = uvNode.build( builder, 'uvec2' );
const storeSnippet = storeNode.build( builder, 'vec4' );
const snippet = builder.generateTextureStore( builder, textureProperty, uvSnippet, storeSnippet );
builder.addLineFlowCode( snippet, this );
}
}
/**
* TSL function for creating a storage texture node.
*
* @function
* @param {StorageTexture} value - The storage texture.
* @param {Node<vec2|vec3>} uvNode - The uv node.
* @param {Node?} [storeNode=null] - The value node that should be stored in the texture.
* @returns {StorageTextureNode}
*/
const storageTexture = /*@__PURE__*/ nodeProxy( StorageTextureNode );
/**
* TODO: Explain difference to `storageTexture()`.
*
* @function
* @param {StorageTexture} value - The storage texture.
* @param {Node<vec2|vec3>} uvNode - The uv node.
* @param {Node?} [storeNode=null] - The value node that should be stored in the texture.
* @returns {StorageTextureNode}
*/
const textureStore = ( value, uvNode, storeNode ) => {
const node = storageTexture( value, uvNode, storeNode );
if ( storeNode !== null ) node.append();
return node;
};
/** @module UserDataNode **/
/**
* A special type of reference node that allows to link values in
* `userData` fields to node objects.
* ```js
* sprite.userData.rotation = 1; // stores individual rotation per sprite
*
* const material = new THREE.SpriteNodeMaterial();
* material.rotationNode = userData( 'rotation', 'float' );
* ```
* Since `UserDataNode` is extended from {@link module:ReferenceNode~ReferenceNode}, the node value
* will automatically be updated when the `rotation` user data field changes.
*
* @augments module:ReferenceNode~ReferenceNode
*/
class UserDataNode extends ReferenceNode {
static get type() {
return 'UserDataNode';
}
/**
* Constructs a new user data node.
*
* @param {String} property - The property name that should be referenced by the node.
* @param {String} inputType - The node data type of the reference.
* @param {Object?} [userData=null] - A reference to the `userData` object. If not provided, the `userData` property of the 3D object that uses the node material is evaluated.
*/
constructor( property, inputType, userData = null ) {
super( property, inputType, userData );
/**
* A reference to the `userData` object. If not provided, the `userData`
* property of the 3D object that uses the node material is evaluated.
*
* @type {Object?}
* @default null
*/
this.userData = userData;
}
/**
* Overwritten to make sure {@link module:ReferenceNode~ReferenceNode#reference} points to the correct
* `userData` field.
*
* @param {(NodeFrame|NodeBuilder)} state - The current state to evaluate.
* @return {Object} A reference to the `userData` field.
*/
updateReference( state ) {
this.reference = this.userData !== null ? this.userData : state.object.userData;
return this.reference;
}
}
/**
* TSL function for creating a user data node.
*
* @function
* @param {String} name - The property name that should be referenced by the node.
* @param {String} inputType - The node data type of the reference.
* @param {Object?} userData - A reference to the `userData` object. If not provided, the `userData` property of the 3D object that uses the node material is evaluated.
* @returns {UserDataNode}
*/
const userData = ( name, inputType, userData ) => nodeObject( new UserDataNode( name, inputType, userData ) );
const _objectData = new WeakMap();
/** @module VelocityNode **/
/**
* A node for representing motion or velocity vectors. Foundation
* for advanced post processing effects like motion blur or TRAA.
*
* The node keeps track of the model, view and projection matrices
* of the previous frame and uses them to compute offsets in NDC space.
* These offsets represent the final velocity.
*
* @augments TempNode
*/
class VelocityNode extends TempNode {
static get type() {
return 'VelocityNode';
}
/**
* Constructs a new vertex color node.
*/
constructor() {
super( 'vec2' );
/**
* The current projection matrix.
*
* @type {Matrix4?}
* @default null
*/
this.projectionMatrix = null;
/**
* Overwritten since velocity nodes are updated per object.
*
* @type {String}
* @default 'object'
*/
this.updateType = NodeUpdateType.OBJECT;
/**
* Overwritten since velocity nodes save data after the update.
*
* @type {String}
* @default 'object'
*/
this.updateAfterType = NodeUpdateType.OBJECT;
/**
* Uniform node representing the previous model matrix in world space.
*
* @type {UniformNode<mat4>}
* @default null
*/
this.previousModelWorldMatrix = uniform( new Matrix4() );
/**
* Uniform node representing the previous projection matrix.
*
* @type {UniformNode<mat4>}
* @default null
*/
this.previousProjectionMatrix = uniform( new Matrix4() ).setGroup( renderGroup );
/**
* Uniform node representing the previous view matrix.
*
* @type {UniformNode<mat4>}
* @default null
*/
this.previousCameraViewMatrix = uniform( new Matrix4() );
}
/**
* Sets the given projection matrix.
*
* @param {Matrix4} projectionMatrix - The projection matrix to set.
*/
setProjectionMatrix( projectionMatrix ) {
this.projectionMatrix = projectionMatrix;
}
/**
* Updates velocity specific uniforms.
*
* @param {NodeFrame} frame - A reference to the current node frame.
*/
update( { frameId, camera, object } ) {
const previousModelMatrix = getPreviousMatrix( object );
this.previousModelWorldMatrix.value.copy( previousModelMatrix );
//
const cameraData = getData( camera );
if ( cameraData.frameId !== frameId ) {
cameraData.frameId = frameId;
if ( cameraData.previousProjectionMatrix === undefined ) {
cameraData.previousProjectionMatrix = new Matrix4();
cameraData.previousCameraViewMatrix = new Matrix4();
cameraData.currentProjectionMatrix = new Matrix4();
cameraData.currentCameraViewMatrix = new Matrix4();
cameraData.previousProjectionMatrix.copy( this.projectionMatrix || camera.projectionMatrix );
cameraData.previousCameraViewMatrix.copy( camera.matrixWorldInverse );
} else {
cameraData.previousProjectionMatrix.copy( cameraData.currentProjectionMatrix );
cameraData.previousCameraViewMatrix.copy( cameraData.currentCameraViewMatrix );
}
cameraData.currentProjectionMatrix.copy( this.projectionMatrix || camera.projectionMatrix );
cameraData.currentCameraViewMatrix.copy( camera.matrixWorldInverse );
this.previousProjectionMatrix.value.copy( cameraData.previousProjectionMatrix );
this.previousCameraViewMatrix.value.copy( cameraData.previousCameraViewMatrix );
}
}
/**
* Overwritten to updated velocity specific uniforms.
*
* @param {NodeFrame} frame - A reference to the current node frame.
*/
updateAfter( { object } ) {
getPreviousMatrix( object ).copy( object.matrixWorld );
}
/**
* Implements the velocity computation based on the previous and current vertex data.
*
* @param {NodeBuilder} builder - A reference to the current node builder.
* @return {Node<vec2>} The motion vector.
*/
setup( /*builder*/ ) {
const projectionMatrix = ( this.projectionMatrix === null ) ? cameraProjectionMatrix : uniform( this.projectionMatrix );
const previousModelViewMatrix = this.previousCameraViewMatrix.mul( this.previousModelWorldMatrix );
const clipPositionCurrent = projectionMatrix.mul( modelViewMatrix ).mul( positionLocal );
const clipPositionPrevious = this.previousProjectionMatrix.mul( previousModelViewMatrix ).mul( positionPrevious );
const ndcPositionCurrent = clipPositionCurrent.xy.div( clipPositionCurrent.w );
const ndcPositionPrevious = clipPositionPrevious.xy.div( clipPositionPrevious.w );
const velocity = sub( ndcPositionCurrent, ndcPositionPrevious );
return velocity;
}
}
function getData( object ) {
let objectData = _objectData.get( object );
if ( objectData === undefined ) {
objectData = {};
_objectData.set( object, objectData );
}
return objectData;
}
function getPreviousMatrix( object, index = 0 ) {
const objectData = getData( object );
let matrix = objectData[ index ];
if ( matrix === undefined ) {
objectData[ index ] = matrix = new Matrix4();
}
return matrix;
}
/**
* TSL object that represents the velocity of a render pass.
*
* @type {VelocityNode}
*/
const velocity = /*@__PURE__*/ nodeImmutable( VelocityNode );
/** @module BlendModes **/
/**
* Represents a "Color Burn" blend mode.
*
* It's designed to darken the base layer's colors based on the color of the blend layer.
* It significantly increases the contrast of the base layer, making the colors more vibrant and saturated.
* The darker the color in the blend layer, the stronger the darkening and contrast effect on the base layer.
*
* @method
* @param {Node<vec3>} base - The base color.
* @param {Node<vec3>} blend - The blend color. A white (#ffffff) blend color does not alter the base color.
* @return {Node<vec3>} The result.
*/
const blendBurn = /*@__PURE__*/ Fn( ( [ base, blend ] ) => {
return min$1( 1.0, base.oneMinus().div( blend ) ).oneMinus();
} ).setLayout( {
name: 'blendBurn',
type: 'vec3',
inputs: [
{ name: 'base', type: 'vec3' },
{ name: 'blend', type: 'vec3' }
]
} );
/**
* Represents a "Color Dodge" blend mode.
*
* It's designed to lighten the base layer's colors based on the color of the blend layer.
* It significantly increases the brightness of the base layer, making the colors lighter and more vibrant.
* The brighter the color in the blend layer, the stronger the lightening and contrast effect on the base layer.
*
* @method
* @param {Node<vec3>} base - The base color.
* @param {Node<vec3>} blend - The blend color. A black (#000000) blend color does not alter the base color.
* @return {Node<vec3>} The result.
*/
const blendDodge = /*@__PURE__*/ Fn( ( [ base, blend ] ) => {
return min$1( base.div( blend.oneMinus() ), 1.0 );
} ).setLayout( {
name: 'blendDodge',
type: 'vec3',
inputs: [
{ name: 'base', type: 'vec3' },
{ name: 'blend', type: 'vec3' }
]
} );
/**
* Represents a "Screen" blend mode.
*
* Similar to `blendDodge()`, this mode also lightens the base layer's colors based on the color of the blend layer.
* The "Screen" blend mode is better for general brightening whereas the "Dodge" results in more subtle and nuanced
* effects.
*
* @method
* @param {Node<vec3>} base - The base color.
* @param {Node<vec3>} blend - The blend color. A black (#000000) blend color does not alter the base color.
* @return {Node<vec3>} The result.
*/
const blendScreen = /*@__PURE__*/ Fn( ( [ base, blend ] ) => {
return base.oneMinus().mul( blend.oneMinus() ).oneMinus();
} ).setLayout( {
name: 'blendScreen',
type: 'vec3',
inputs: [
{ name: 'base', type: 'vec3' },
{ name: 'blend', type: 'vec3' }
]
} );
/**
* Represents a "Overlay" blend mode.
*
* It's designed to increase the contrast of the base layer based on the color of the blend layer.
* It amplifies the existing colors and contrast in the base layer, making lighter areas lighter and darker areas darker.
* The color of the blend layer significantly influences the resulting contrast and color shift in the base layer.
*
* @method
* @param {Node<vec3>} base - The base color.
* @param {Node<vec3>} blend - The blend color
* @return {Node<vec3>} The result.
*/
const blendOverlay = /*@__PURE__*/ Fn( ( [ base, blend ] ) => {
return mix( base.mul( 2.0 ).mul( blend ), base.oneMinus().mul( 2.0 ).mul( blend.oneMinus() ).oneMinus(), step( 0.5, base ) );
} ).setLayout( {
name: 'blendOverlay',
type: 'vec3',
inputs: [
{ name: 'base', type: 'vec3' },
{ name: 'blend', type: 'vec3' }
]
} );
/**
* This function blends two color based on their alpha values by replicating the behavior of `THREE.NormalBlending`.
* It assumes both input colors have non-preumiltiplied alpha.
*
* @method
* @param {Node<vec4>} base - The base color.
* @param {Node<vec4>} blend - The blend color
* @return {Node<vec4>} The result.
*/
const blendColor = /*@__PURE__*/ Fn( ( [ base, blend ] ) => {
const outAlpha = blend.a.add( base.a.mul( blend.a.oneMinus() ) );
return vec4( blend.rgb.mul( blend.a ).add( base.rgb.mul( base.a ).mul( blend.a.oneMinus() ) ).div( outAlpha ), outAlpha );
} ).setLayout( {
name: 'blendColor',
type: 'vec4',
inputs: [
{ name: 'base', type: 'vec4' },
{ name: 'blend', type: 'vec4' }
]
} );
// Deprecated
/**
* @function
* @deprecated since r171. Use {@link blendBurn} instead.
*
* @param {...any} params
* @returns {Function}
*/
const burn = ( ...params ) => { // @deprecated, r171
console.warn( 'THREE.TSL: "burn" has been renamed. Use "blendBurn" instead.' );
return blendBurn( params );
};
/**
* @function
* @deprecated since r171. Use {@link blendDodge} instead.
*
* @param {...any} params
* @returns {Function}
*/
const dodge = ( ...params ) => { // @deprecated, r171
console.warn( 'THREE.TSL: "dodge" has been renamed. Use "blendDodge" instead.' );
return blendDodge( params );
};
/**
* @method
* @deprecated since r171. Use {@link blendScreen} instead.
*
* @param {...any} params
* @returns {Function}
*/
const screen = ( ...params ) => { // @deprecated, r171
console.warn( 'THREE.TSL: "screen" has been renamed. Use "blendScreen" instead.' );
return blendScreen( params );
};
/**
* @method
* @deprecated since r171. Use {@link blendOverlay} instead.
*
* @param {...any} params
* @returns {Function}
*/
const overlay = ( ...params ) => { // @deprecated, r171
console.warn( 'THREE.TSL: "overlay" has been renamed. Use "blendOverlay" instead.' );
return blendOverlay( params );
};
/** @module ColorAdjustment **/
/**
* Computes a grayscale value for the given RGB color value.
*
* @method
* @param {Node<vec3>} color - The color value to compute the grayscale for.
* @return {Node<vec3>} The grayscale color.
*/
const grayscale = /*@__PURE__*/ Fn( ( [ color ] ) => {
return luminance( color.rgb );
} );
/**
* Super-saturates or desaturates the given RGB color.
*
* @method
* @param {Node<vec3>} color - The input color.
* @param {Node<float>} [adjustment=1] - Specifies the amount of the conversion. A value under `1` desaturates the color, a value over `1` super-saturates it.
* @return {Node<vec3>} The saturated color.
*/
const saturation = /*@__PURE__*/ Fn( ( [ color, adjustment = float( 1 ) ] ) => {
return adjustment.mix( luminance( color.rgb ), color.rgb );
} );
/**
* Selectively enhance the intensity of less saturated RGB colors. Can result
* in a more natural and visually appealing image with enhanced color depth
* compared to {@link ColorAdjustment#saturation}.
*
* @method
* @param {Node<vec3>} color - The input color.
* @param {Node<float>} [adjustment=1] - Controls the intensity of the vibrance effect.
* @return {Node<vec3>} The updated color.
*/
const vibrance = /*@__PURE__*/ Fn( ( [ color, adjustment = float( 1 ) ] ) => {
const average = add( color.r, color.g, color.b ).div( 3.0 );
const mx = color.r.max( color.g.max( color.b ) );
const amt = mx.sub( average ).mul( adjustment ).mul( - 3.0 );
return mix( color.rgb, mx, amt );
} );
/**
* Updates the hue component of the given RGB color while preserving its luminance and saturation.
*
* @method
* @param {Node<vec3>} color - The input color.
* @param {Node<float>} [adjustment=1] - Defines the degree of hue rotation in radians. A positive value rotates the hue clockwise, while a negative value rotates it counterclockwise.
* @return {Node<vec3>} The updated color.
*/
const hue = /*@__PURE__*/ Fn( ( [ color, adjustment = float( 1 ) ] ) => {
const k = vec3( 0.57735, 0.57735, 0.57735 );
const cosAngle = adjustment.cos();
return vec3( color.rgb.mul( cosAngle ).add( k.cross( color.rgb ).mul( adjustment.sin() ).add( k.mul( dot( k, color.rgb ).mul( cosAngle.oneMinus() ) ) ) ) );
} );
/**
* Computes the luminance for the given RGB color value.
*
* @method
* @param {Node<vec3>} color - The color value to compute the luminance for.
* @param {Node<vec3>?} luminanceCoefficients - The luminance coefficients. By default predefined values of the current working color space are used.
* @return {Node<vec3>} The luminance.
*/
const luminance = (
color,
luminanceCoefficients = vec3( ColorManagement.getLuminanceCoefficients( new Vector3() ) )
) => dot( color, luminanceCoefficients );
/**
* Color Decision List (CDL) v1.2
*
* Compact representation of color grading information, defined by slope, offset, power, and
* saturation. The CDL should be typically be given input in a log space (such as LogC, ACEScc,
* or AgX Log), and will return output in the same space. Output may require clamping >=0.
*
* @method
* @param {Node<vec4>} color Input (-Infinity < input < +Infinity)
* @param {Node<vec3>} slope Slope (0 ≤ slope < +Infinity)
* @param {Node<vec3>} offset Offset (-Infinity < offset < +Infinity; typically -1 < offset < 1)
* @param {Node<vec3>} power Power (0 < power < +Infinity)
* @param {Node<float>} saturation Saturation (0 ≤ saturation < +Infinity; typically 0 ≤ saturation < 4)
* @param {Node<vec3>} luminanceCoefficients Luminance coefficients for saturation term, typically Rec. 709
* @return {Node<vec4>} Output, -Infinity < output < +Infinity
*
* References:
* - ASC CDL v1.2
* - {@link https://blender.stackexchange.com/a/55239/43930}
* - {@link https://docs.acescentral.com/specifications/acescc/}
*/
const cdl = /*@__PURE__*/ Fn( ( [
color,
slope = vec3( 1 ),
offset = vec3( 0 ),
power = vec3( 1 ),
saturation = float( 1 ),
// ASC CDL v1.2 explicitly requires Rec. 709 luminance coefficients.
luminanceCoefficients = vec3( ColorManagement.getLuminanceCoefficients( new Vector3(), LinearSRGBColorSpace ) )
] ) => {
// NOTE: The ASC CDL v1.2 defines a [0, 1] clamp on the slope+offset term, and another on the
// saturation term. Per the ACEScc specification and Filament, limits may be omitted to support
// values outside [0, 1], requiring a workaround for negative values in the power expression.
const luma = color.rgb.dot( vec3( luminanceCoefficients ) );
const v = max$1( color.rgb.mul( slope ).add( offset ), 0.0 ).toVar();
const pv = v.pow( power ).toVar();
If( v.r.greaterThan( 0.0 ), () => { v.r.assign( pv.r ); } ); // eslint-disable-line
If( v.g.greaterThan( 0.0 ), () => { v.g.assign( pv.g ); } ); // eslint-disable-line
If( v.b.greaterThan( 0.0 ), () => { v.b.assign( pv.b ); } ); // eslint-disable-line
v.assign( luma.add( v.sub( luma ).mul( saturation ) ) );
return vec4( v.rgb, color.a );
} );
/** @module PosterizeNode **/
/**
* Represents a posterize effect which reduces the number of colors
* in an image, resulting in a more blocky and stylized appearance.
*
* @augments TempNode
*/
class PosterizeNode extends TempNode {
static get type() {
return 'PosterizeNode';
}
/**
* Constructs a new posterize node.
*
* @param {Node} sourceNode - The input color.
* @param {Node} stepsNode - Controls the intensity of the posterization effect. A lower number results in a more blocky appearance.
*/
constructor( sourceNode, stepsNode ) {
super();
/**
* The input color.
*
* @type {Node}
*/
this.sourceNode = sourceNode;
/**
* Controls the intensity of the posterization effect. A lower number results in a more blocky appearance.
*
* @type {Node}
*/
this.stepsNode = stepsNode;
}
setup() {
const { sourceNode, stepsNode } = this;
return sourceNode.mul( stepsNode ).floor().div( stepsNode );
}
}
/**
* TSL function for creating a posterize node.
*
* @function
* @param {Node} sourceNode - The input color.
* @param {Node} stepsNode - Controls the intensity of the posterization effect. A lower number results in a more blocky appearance.
* @returns {PosterizeNode}
*/
const posterize = /*@__PURE__*/ nodeProxy( PosterizeNode );
/** @module PassNode **/
const _size = /*@__PURE__*/ new Vector2();
/**
* Represents the texture of a pass node.
*
* @augments module:TextureNode~TextureNode
*/
class PassTextureNode extends TextureNode {
static get type() {
return 'PassTextureNode';
}
/**
* Constructs a new pass texture node.
*
* @param {PassNode} passNode - The pass node.
* @param {Texture} texture - The output texture.
*/
constructor( passNode, texture ) {
super( texture );
/**
* A reference to the pass node.
*
* @type {PassNode}
*/
this.passNode = passNode;
this.setUpdateMatrix( false );
}
setup( builder ) {
if ( builder.object.isQuadMesh ) this.passNode.build( builder );
return super.setup( builder );
}
clone() {
return new this.constructor( this.passNode, this.value );
}
}
/**
* An extension of `PassTextureNode` which allows to manage more than one
* internal texture. Relevant for the `getPreviousTexture()` related API.
*
* @augments module:PassTextureNode~PassTextureNode
*/
class PassMultipleTextureNode extends PassTextureNode {
static get type() {
return 'PassMultipleTextureNode';
}
/**
* Constructs a new pass texture node.
*
* @param {PassNode} passNode - The pass node.
* @param {String} textureName - The output texture name.
* @param {Boolean} [previousTexture=false] - Whether previous frame data should be used or not.
*/
constructor( passNode, textureName, previousTexture = false ) {
// null is passed to the super call since this class does not
// use an external texture for rendering pass data into. Instead
// the texture is managed by the pass node itself
super( passNode, null );
/**
* The output texture name.
*
* @type {String}
*/
this.textureName = textureName;
/**
* Whether previous frame data should be used or not.
*
* @type {Boolean}
*/
this.previousTexture = previousTexture;
}
/**
* Updates the texture reference of this node.
*/
updateTexture() {
this.value = this.previousTexture ? this.passNode.getPreviousTexture( this.textureName ) : this.passNode.getTexture( this.textureName );
}
setup( builder ) {
this.updateTexture();
return super.setup( builder );
}
clone() {
return new this.constructor( this.passNode, this.textureName, this.previousTexture );
}
}
/**
* Represents a render pass (sometimes called beauty pass) in context of post processing.
* This pass produces a render for the given scene and camera and can provide multiple outputs
* via MRT for further processing.
*
* ```js
* const postProcessing = new PostProcessing( renderer );
*
* const scenePass = pass( scene, camera );
*
* postProcessing.outputNode = scenePass;
* ```
*
* @augments TempNode
*/
class PassNode extends TempNode {
static get type() {
return 'PassNode';
}
/**
* Constructs a new pass node.
*
* @param {('color'|'depth')} scope - The scope of the pass. The scope determines whether the node outputs color or depth.
* @param {Scene} scene - A reference to the scene.
* @param {Camera} camera - A reference to the camera.
* @param {Object} options - Options for the internal render target.
*/
constructor( scope, scene, camera, options = {} ) {
super( 'vec4' );
/**
* The scope of the pass. The scope determines whether the node outputs color or depth.
*
* @type {('color'|'depth')}
*/
this.scope = scope;
/**
* A reference to the scene.
*
* @type {Scene}
*/
this.scene = scene;
/**
* A reference to the camera.
*
* @type {Camera}
*/
this.camera = camera;
/**
* Options for the internal render target.
*
* @type {Object}
*/
this.options = options;
/**
* The pass's pixel ratio. Will be kept automatically kept in sync with the renderer's pixel ratio.
*
* @private
* @type {Number}
* @default 1
*/
this._pixelRatio = 1;
/**
* The pass's pixel width. Will be kept automatically kept in sync with the renderer's width.
* @private
* @type {Number}
* @default 1
*/
this._width = 1;
/**
* The pass's pixel height. Will be kept automatically kept in sync with the renderer's height.
* @private
* @type {Number}
* @default 1
*/
this._height = 1;
const depthTexture = new DepthTexture();
depthTexture.isRenderTargetTexture = true;
//depthTexture.type = FloatType;
depthTexture.name = 'depth';
const renderTarget = new RenderTarget( this._width * this._pixelRatio, this._height * this._pixelRatio, { type: HalfFloatType, ...options, } );
renderTarget.texture.name = 'output';
renderTarget.depthTexture = depthTexture;
/**
* The pass's render target.
*
* @type {RenderTarget}
*/
this.renderTarget = renderTarget;
/**
* A dictionary holding the internal result textures.
*
* @private
* @type {Object<String, Texture>}
*/
this._textures = {
output: renderTarget.texture,
depth: depthTexture
};
/**
* A dictionary holding the internal texture nodes.
*
* @private
* @type {Object<String, TextureNode>}
*/
this._textureNodes = {};
/**
* A dictionary holding the internal depth nodes.
*
* @private
* @type {Object}
*/
this._linearDepthNodes = {};
/**
* A dictionary holding the internal viewZ nodes.
*
* @private
* @type {Object}
*/
this._viewZNodes = {};
/**
* A dictionary holding the texture data of the previous frame.
* Used for computing velocity/motion vectors.
*
* @private
* @type {Object<String, Texture>}
*/
this._previousTextures = {};
/**
* A dictionary holding the texture nodes of the previous frame.
* Used for computing velocity/motion vectors.
*
* @private
* @type {Object<String, TextureNode>}
*/
this._previousTextureNodes = {};
/**
* The `near` property of the camera as a uniform.
*
* @private
* @type {UniformNode}
*/
this._cameraNear = uniform( 0 );
/**
* The `far` property of the camera as a uniform.
*
* @private
* @type {UniformNode}
*/
this._cameraFar = uniform( 0 );
/**
* A MRT node configuring the MRT settings.
*
* @private
* @type {MRTNode?}
* @default null
*/
this._mrt = null;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isPassNode = true;
/**
* The `updateBeforeType` is set to `NodeUpdateType.FRAME` since the node renders the
* scene once per frame in its {@link PassNode#updateBefore} method.
*
* @type {String}
* @default 'frame'
*/
this.updateBeforeType = NodeUpdateType.FRAME;
}
/**
* Sets the given MRT node to setup MRT for this pass.
*
* @param {MRTNode} mrt - The MRT object.
* @return {PassNode} A reference to this pass.
*/
setMRT( mrt ) {
this._mrt = mrt;
return this;
}
/**
* Returns the current MRT node.
*
* @return {MRTNode} The current MRT node.
*/
getMRT() {
return this._mrt;
}
/**
* The method is overwritten so it always returns `true`.
*
* @return {Boolean} Whether this node is global or not.
*/
isGlobal() {
return true;
}
/**
* Returns the texture for the given output name.
*
* @param {String} name - The output name to get the texture for.
* @return {Texture} The texture.
*/
getTexture( name ) {
let texture = this._textures[ name ];
if ( texture === undefined ) {
const refTexture = this.renderTarget.texture;
texture = refTexture.clone();
texture.name = name;
this._textures[ name ] = texture;
this.renderTarget.textures.push( texture );
}
return texture;
}
/**
* Returns the texture holding the data of the previous frame for the given output name.
*
* @param {String} name - The output name to get the texture for.
* @return {Texture} The texture holding the data of the previous frame.
*/
getPreviousTexture( name ) {
let texture = this._previousTextures[ name ];
if ( texture === undefined ) {
texture = this.getTexture( name ).clone();
this._previousTextures[ name ] = texture;
}
return texture;
}
/**
* Switches current and previous textures for the given output name.
*
* @param {String} name - The output name.
*/
toggleTexture( name ) {
const prevTexture = this._previousTextures[ name ];
if ( prevTexture !== undefined ) {
const texture = this._textures[ name ];
const index = this.renderTarget.textures.indexOf( texture );
this.renderTarget.textures[ index ] = prevTexture;
this._textures[ name ] = prevTexture;
this._previousTextures[ name ] = texture;
this._textureNodes[ name ].updateTexture();
this._previousTextureNodes[ name ].updateTexture();
}
}
/**
* Returns the texture node for the given output name.
*
* @param {String} [name='output'] - The output name to get the texture node for.
* @return {TextureNode} The texture node.
*/
getTextureNode( name = 'output' ) {
let textureNode = this._textureNodes[ name ];
if ( textureNode === undefined ) {
textureNode = nodeObject( new PassMultipleTextureNode( this, name ) );
textureNode.updateTexture();
this._textureNodes[ name ] = textureNode;
}
return textureNode;
}
/**
* Returns the previous texture node for the given output name.
*
* @param {String} [name='output'] - The output name to get the previous texture node for.
* @return {TextureNode} The previous texture node.
*/
getPreviousTextureNode( name = 'output' ) {
let textureNode = this._previousTextureNodes[ name ];
if ( textureNode === undefined ) {
if ( this._textureNodes[ name ] === undefined ) this.getTextureNode( name );
textureNode = nodeObject( new PassMultipleTextureNode( this, name, true ) );
textureNode.updateTexture();
this._previousTextureNodes[ name ] = textureNode;
}
return textureNode;
}
/**
* Returns a viewZ node of this pass.
*
* @param {String} [name='depth'] - The output name to get the viewZ node for. In most cases the default `'depth'` can be used however the parameter exists for custom depth outputs.
* @return {Node} The viewZ node.
*/
getViewZNode( name = 'depth' ) {
let viewZNode = this._viewZNodes[ name ];
if ( viewZNode === undefined ) {
const cameraNear = this._cameraNear;
const cameraFar = this._cameraFar;
this._viewZNodes[ name ] = viewZNode = perspectiveDepthToViewZ( this.getTextureNode( name ), cameraNear, cameraFar );
}
return viewZNode;
}
/**
* Returns a linear depth node of this pass.
*
* @param {String} [name='depth'] - The output name to get the linear depth node for. In most cases the default `'depth'` can be used however the parameter exists for custom depth outputs.
* @return {Node} The linear depth node.
*/
getLinearDepthNode( name = 'depth' ) {
let linearDepthNode = this._linearDepthNodes[ name ];
if ( linearDepthNode === undefined ) {
const cameraNear = this._cameraNear;
const cameraFar = this._cameraFar;
const viewZNode = this.getViewZNode( name );
// TODO: just if ( builder.camera.isPerspectiveCamera )
this._linearDepthNodes[ name ] = linearDepthNode = viewZToOrthographicDepth( viewZNode, cameraNear, cameraFar );
}
return linearDepthNode;
}
setup( { renderer } ) {
this.renderTarget.samples = this.options.samples === undefined ? renderer.samples : this.options.samples;
// TODO: Disable MSAA for WebGL backend for now
if ( renderer.backend.isWebGLBackend === true ) {
this.renderTarget.samples = 0;
}
this.renderTarget.texture.type = renderer.getColorBufferType();
return this.scope === PassNode.COLOR ? this.getTextureNode() : this.getLinearDepthNode();
}
updateBefore( frame ) {
const { renderer } = frame;
const { scene, camera } = this;
this._pixelRatio = renderer.getPixelRatio();
const size = renderer.getSize( _size );
this.setSize( size.width, size.height );
const currentRenderTarget = renderer.getRenderTarget();
const currentMRT = renderer.getMRT();
this._cameraNear.value = camera.near;
this._cameraFar.value = camera.far;
for ( const name in this._previousTextures ) {
this.toggleTexture( name );
}
renderer.setRenderTarget( this.renderTarget );
renderer.setMRT( this._mrt );
renderer.render( scene, camera );
renderer.setRenderTarget( currentRenderTarget );
renderer.setMRT( currentMRT );
}
/**
* Sets the size of the pass's render target. Honors the pixel ratio.
*
* @param {Number} width - The width to set.
* @param {Number} height - The height to set.
*/
setSize( width, height ) {
this._width = width;
this._height = height;
const effectiveWidth = this._width * this._pixelRatio;
const effectiveHeight = this._height * this._pixelRatio;
this.renderTarget.setSize( effectiveWidth, effectiveHeight );
}
/**
* Sets the pixel ratio the pass's render target and updates the size.
*
* @param {Number} pixelRatio - The pixel ratio to set.
*/
setPixelRatio( pixelRatio ) {
this._pixelRatio = pixelRatio;
this.setSize( this._width, this._height );
}
/**
* Frees internal resources. Should be called when the node is no longer in use.
*/
dispose() {
this.renderTarget.dispose();
}
}
PassNode.COLOR = 'color';
PassNode.DEPTH = 'depth';
/**
* TSL function for creating a pass node.
*
* @function
* @param {Scene} scene - A reference to the scene.
* @param {Camera} camera - A reference to the camera.
* @param {Object} options - Options for the internal render target.
* @returns {PassNode}
*/
const pass = ( scene, camera, options ) => nodeObject( new PassNode( PassNode.COLOR, scene, camera, options ) );
/**
* TSL function for creating a pass texture node.
*
* @function
* @param {PassNode} pass - The pass node.
* @param {Texture} texture - The output texture.
* @returns {PassTextureNode}
*/
const passTexture = ( pass, texture ) => nodeObject( new PassTextureNode( pass, texture ) );
/**
* TSL function for creating a depth pass node.
*
* @function
* @param {Scene} scene - A reference to the scene.
* @param {Camera} camera - A reference to the camera.
* @param {Object} options - Options for the internal render target.
* @returns {PassNode}
*/
const depthPass = ( scene, camera, options ) => nodeObject( new PassNode( PassNode.DEPTH, scene, camera, options ) );
/** @module ToonOutlinePassNode **/
/**
* Represents a render pass for producing a toon outline effect on compatible objects.
* Only 3D objects with materials of type `MeshToonMaterial` and `MeshToonNodeMaterial`
* will receive the outline.
*
* ```js
* const postProcessing = new PostProcessing( renderer );
*
* const scenePass = toonOutlinePass( scene, camera );
*
* postProcessing.outputNode = scenePass;
* ```
* @augments PassNode
*/
class ToonOutlinePassNode extends PassNode {
static get type() {
return 'ToonOutlinePassNode';
}
/**
* Constructs a new outline pass node.
*
* @param {Scene} scene - A reference to the scene.
* @param {Camera} camera - A reference to the camera.
* @param {Node} colorNode - Defines the outline's color.
* @param {Node} thicknessNode - Defines the outline's thickness.
* @param {Node} alphaNode - Defines the outline's alpha.
*/
constructor( scene, camera, colorNode, thicknessNode, alphaNode ) {
super( PassNode.COLOR, scene, camera );
/**
* Defines the outline's color.
*
* @type {Node}
*/
this.colorNode = colorNode;
/**
* Defines the outline's thickness.
*
* @type {Node}
*/
this.thicknessNode = thicknessNode;
/**
* Defines the outline's alpha.
*
* @type {Node}
*/
this.alphaNode = alphaNode;
/**
* An internal material cache.
*
* @private
* @type {WeakMap<Material, NodeMaterial>}
*/
this._materialCache = new WeakMap();
}
updateBefore( frame ) {
const { renderer } = frame;
const currentRenderObjectFunction = renderer.getRenderObjectFunction();
renderer.setRenderObjectFunction( ( object, scene, camera, geometry, material, group, lightsNode, clippingContext ) => {
// only render outline for supported materials
if ( material.isMeshToonMaterial || material.isMeshToonNodeMaterial ) {
if ( material.wireframe === false ) {
const outlineMaterial = this._getOutlineMaterial( material );
renderer.renderObject( object, scene, camera, geometry, outlineMaterial, group, lightsNode, clippingContext );
}
}
// default
renderer.renderObject( object, scene, camera, geometry, material, group, lightsNode, clippingContext );
} );
super.updateBefore( frame );
renderer.setRenderObjectFunction( currentRenderObjectFunction );
}
/**
* Creates the material used for outline rendering.
*
* @private
* @return {NodeMaterial} The outline material.
*/
_createMaterial() {
const material = new NodeMaterial();
material.isMeshToonOutlineMaterial = true;
material.name = 'Toon_Outline';
material.side = BackSide;
// vertex node
const outlineNormal = normalLocal.negate();
const mvp = cameraProjectionMatrix.mul( modelViewMatrix );
const ratio = float( 1.0 ); // TODO: support outline thickness ratio for each vertex
const pos = mvp.mul( vec4( positionLocal, 1.0 ) );
const pos2 = mvp.mul( vec4( positionLocal.add( outlineNormal ), 1.0 ) );
const norm = normalize( pos.sub( pos2 ) ); // NOTE: subtract pos2 from pos because BackSide objectNormal is negative
material.vertexNode = pos.add( norm.mul( this.thicknessNode ).mul( pos.w ).mul( ratio ) );
// color node
material.colorNode = vec4( this.colorNode, this.alphaNode );
return material;
}
/**
* For the given toon material, this method returns a corresponding
* outline material.
*
* @private
* @param {(MeshToonMaterial|MeshToonNodeMaterial)} originalMaterial - The toon material.
* @return {NodeMaterial} The outline material.
*/
_getOutlineMaterial( originalMaterial ) {
let outlineMaterial = this._materialCache.get( originalMaterial );
if ( outlineMaterial === undefined ) {
outlineMaterial = this._createMaterial();
this._materialCache.set( originalMaterial, outlineMaterial );
}
return outlineMaterial;
}
}
/**
* TSL function for creating a toon outline pass node.
*
* @function
* @param {Scene} scene - A reference to the scene.
* @param {Camera} camera - A reference to the camera.
* @param {Color} color - Defines the outline's color.
* @param {Number} [thickness=0.003] - Defines the outline's thickness.
* @param {Number} [alpha=1] - Defines the outline's alpha.
* @returns {ToonOutlinePassNode}
*/
const toonOutlinePass = ( scene, camera, color = new Color( 0, 0, 0 ), thickness = 0.003, alpha = 1 ) => nodeObject( new ToonOutlinePassNode( scene, camera, nodeObject( color ), nodeObject( thickness ), nodeObject( alpha ) ) );
/** @module ToneMappingFunctions **/
/**
* Linear tone mapping, exposure only.
*
* @method
* @param {Node<vec3>} color - The color that should be tone mapped.
* @param {Node<float>} exposure - The exposure.
* @return {Node<vec3>} The tone mapped color.
*/
const linearToneMapping = /*@__PURE__*/ Fn( ( [ color, exposure ] ) => {
return color.mul( exposure ).clamp();
} ).setLayout( {
name: 'linearToneMapping',
type: 'vec3',
inputs: [
{ name: 'color', type: 'vec3' },
{ name: 'exposure', type: 'float' }
]
} );
/**
* Reinhard tone mapping.
*
* Reference: {@link https://www.cs.utah.edu/docs/techreports/2002/pdf/UUCS-02-001.pdf}
*
* @method
* @param {Node<vec3>} color - The color that should be tone mapped.
* @param {Node<float>} exposure - The exposure.
* @return {Node<vec3>} The tone mapped color.
*/
const reinhardToneMapping = /*@__PURE__*/ Fn( ( [ color, exposure ] ) => {
color = color.mul( exposure );
return color.div( color.add( 1.0 ) ).clamp();
} ).setLayout( {
name: 'reinhardToneMapping',
type: 'vec3',
inputs: [
{ name: 'color', type: 'vec3' },
{ name: 'exposure', type: 'float' }
]
} );
/**
* Cineon tone mapping.
*
* Reference: {@link http://filmicworlds.com/blog/filmic-tonemapping-operators/}
*
* @method
* @param {Node<vec3>} color - The color that should be tone mapped.
* @param {Node<float>} exposure - The exposure.
* @return {Node<vec3>} The tone mapped color.
*/
const cineonToneMapping = /*@__PURE__*/ Fn( ( [ color, exposure ] ) => {
// filmic operator by Jim Hejl and Richard Burgess-Dawson
color = color.mul( exposure );
color = color.sub( 0.004 ).max( 0.0 );
const a = color.mul( color.mul( 6.2 ).add( 0.5 ) );
const b = color.mul( color.mul( 6.2 ).add( 1.7 ) ).add( 0.06 );
return a.div( b ).pow( 2.2 );
} ).setLayout( {
name: 'cineonToneMapping',
type: 'vec3',
inputs: [
{ name: 'color', type: 'vec3' },
{ name: 'exposure', type: 'float' }
]
} );
// source: https://github.com/selfshadow/ltc_code/blob/master/webgl/shaders/ltc/ltc_blit.fs
const RRTAndODTFit = /*@__PURE__*/ Fn( ( [ color ] ) => {
const a = color.mul( color.add( 0.0245786 ) ).sub( 0.000090537 );
const b = color.mul( color.add( 0.4329510 ).mul( 0.983729 ) ).add( 0.238081 );
return a.div( b );
} );
/**
* ACESFilmic tone mapping.
*
* Reference: {@link https://github.com/selfshadow/ltc_code/blob/master/webgl/shaders/ltc/ltc_blit.fs}
*
* @method
* @param {Node<vec3>} color - The color that should be tone mapped.
* @param {Node<float>} exposure - The exposure.
* @return {Node<vec3>} The tone mapped color.
*/
const acesFilmicToneMapping = /*@__PURE__*/ Fn( ( [ color, exposure ] ) => {
// sRGB => XYZ => D65_2_D60 => AP1 => RRT_SAT
const ACESInputMat = mat3(
0.59719, 0.35458, 0.04823,
0.07600, 0.90834, 0.01566,
0.02840, 0.13383, 0.83777
);
// ODT_SAT => XYZ => D60_2_D65 => sRGB
const ACESOutputMat = mat3(
1.60475, - 0.53108, - 0.07367,
- 0.10208, 1.10813, - 0.00605,
- 0.00327, - 0.07276, 1.07602
);
color = color.mul( exposure ).div( 0.6 );
color = ACESInputMat.mul( color );
// Apply RRT and ODT
color = RRTAndODTFit( color );
color = ACESOutputMat.mul( color );
// Clamp to [0, 1]
return color.clamp();
} ).setLayout( {
name: 'acesFilmicToneMapping',
type: 'vec3',
inputs: [
{ name: 'color', type: 'vec3' },
{ name: 'exposure', type: 'float' }
]
} );
const LINEAR_REC2020_TO_LINEAR_SRGB = /*@__PURE__*/ mat3( vec3( 1.6605, - 0.1246, - 0.0182 ), vec3( - 0.5876, 1.1329, - 0.1006 ), vec3( - 0.0728, - 0.0083, 1.1187 ) );
const LINEAR_SRGB_TO_LINEAR_REC2020 = /*@__PURE__*/ mat3( vec3( 0.6274, 0.0691, 0.0164 ), vec3( 0.3293, 0.9195, 0.0880 ), vec3( 0.0433, 0.0113, 0.8956 ) );
const agxDefaultContrastApprox = /*@__PURE__*/ Fn( ( [ x_immutable ] ) => {
const x = vec3( x_immutable ).toVar();
const x2 = vec3( x.mul( x ) ).toVar();
const x4 = vec3( x2.mul( x2 ) ).toVar();
return float( 15.5 ).mul( x4.mul( x2 ) ).sub( mul( 40.14, x4.mul( x ) ) ).add( mul( 31.96, x4 ).sub( mul( 6.868, x2.mul( x ) ) ).add( mul( 0.4298, x2 ).add( mul( 0.1191, x ).sub( 0.00232 ) ) ) );
} );
/**
* AgX tone mapping.
*
* @method
* @param {Node<vec3>} color - The color that should be tone mapped.
* @param {Node<float>} exposure - The exposure.
* @return {Node<vec3>} The tone mapped color.
*/
const agxToneMapping = /*@__PURE__*/ Fn( ( [ color, exposure ] ) => {
const colortone = vec3( color ).toVar();
const AgXInsetMatrix = mat3( vec3( 0.856627153315983, 0.137318972929847, 0.11189821299995 ), vec3( 0.0951212405381588, 0.761241990602591, 0.0767994186031903 ), vec3( 0.0482516061458583, 0.101439036467562, 0.811302368396859 ) );
const AgXOutsetMatrix = mat3( vec3( 1.1271005818144368, - 0.1413297634984383, - 0.14132976349843826 ), vec3( - 0.11060664309660323, 1.157823702216272, - 0.11060664309660294 ), vec3( - 0.016493938717834573, - 0.016493938717834257, 1.2519364065950405 ) );
const AgxMinEv = float( - 12.47393 );
const AgxMaxEv = float( 4.026069 );
colortone.mulAssign( exposure );
colortone.assign( LINEAR_SRGB_TO_LINEAR_REC2020.mul( colortone ) );
colortone.assign( AgXInsetMatrix.mul( colortone ) );
colortone.assign( max$1( colortone, 1e-10 ) );
colortone.assign( log2( colortone ) );
colortone.assign( colortone.sub( AgxMinEv ).div( AgxMaxEv.sub( AgxMinEv ) ) );
colortone.assign( clamp( colortone, 0.0, 1.0 ) );
colortone.assign( agxDefaultContrastApprox( colortone ) );
colortone.assign( AgXOutsetMatrix.mul( colortone ) );
colortone.assign( pow( max$1( vec3( 0.0 ), colortone ), vec3( 2.2 ) ) );
colortone.assign( LINEAR_REC2020_TO_LINEAR_SRGB.mul( colortone ) );
colortone.assign( clamp( colortone, 0.0, 1.0 ) );
return colortone;
} ).setLayout( {
name: 'agxToneMapping',
type: 'vec3',
inputs: [
{ name: 'color', type: 'vec3' },
{ name: 'exposure', type: 'float' }
]
} );
/**
* Neutral tone mapping.
*
* Reference: {@link https://modelviewer.dev/examples/tone-mapping}
*
* @method
* @param {Node<vec3>} color - The color that should be tone mapped.
* @param {Node<float>} exposure - The exposure.
* @return {Node<vec3>} The tone mapped color.
*/
const neutralToneMapping = /*@__PURE__*/ Fn( ( [ color, exposure ] ) => {
const StartCompression = float( 0.8 - 0.04 );
const Desaturation = float( 0.15 );
color = color.mul( exposure );
const x = min$1( color.r, min$1( color.g, color.b ) );
const offset = select( x.lessThan( 0.08 ), x.sub( mul( 6.25, x.mul( x ) ) ), 0.04 );
color.subAssign( offset );
const peak = max$1( color.r, max$1( color.g, color.b ) );
If( peak.lessThan( StartCompression ), () => {
return color;
} );
const d = sub( 1, StartCompression );
const newPeak = sub( 1, d.mul( d ).div( peak.add( d.sub( StartCompression ) ) ) );
color.mulAssign( newPeak.div( peak ) );
const g = sub( 1, div( 1, Desaturation.mul( peak.sub( newPeak ) ).add( 1 ) ) );
return mix( color, vec3( newPeak ), g );
} ).setLayout( {
name: 'neutralToneMapping',
type: 'vec3',
inputs: [
{ name: 'color', type: 'vec3' },
{ name: 'exposure', type: 'float' }
]
} );
/** @module CodeNode **/
/**
* This class represents native code sections. It is the base
* class for modules like {@link FunctionNode} which allows to implement
* functions with native shader languages.
*
* @augments Node
*/
class CodeNode extends Node {
static get type() {
return 'CodeNode';
}
/**
* Constructs a new code node.
*
* @param {String} [code=''] - The native code.
* @param {Array<Node>} [includes=[]] - An array of includes.
* @param {('js'|'wgsl'|'glsl')} [language=''] - The used language.
*/
constructor( code = '', includes = [], language = '' ) {
super( 'code' );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isCodeNode = true;
/**
* The native code.
*
* @type {String}
* @default ''
*/
this.code = code;
/**
* An array of includes
*
* @type {Array<Node>}
* @default []
*/
this.includes = includes;
/**
* The used language.
*
* @type {('js'|'wgsl'|'glsl')}
* @default ''
*/
this.language = language;
}
/**
* The method is overwritten so it always returns `true`.
*
* @return {Boolean} Whether this node is global or not.
*/
isGlobal() {
return true;
}
/**
* Sets the includes of this code node.
*
* @param {Array<Node>} includes - The includes to set.
* @return {CodeNode} A reference to this node.
*/
setIncludes( includes ) {
this.includes = includes;
return this;
}
/**
* Returns the includes of this code node.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {Array<Node>} The includes.
*/
getIncludes( /*builder*/ ) {
return this.includes;
}
generate( builder ) {
const includes = this.getIncludes( builder );
for ( const include of includes ) {
include.build( builder );
}
const nodeCode = builder.getCodeFromNode( this, this.getNodeType( builder ) );
nodeCode.code = this.code;
return nodeCode.code;
}
serialize( data ) {
super.serialize( data );
data.code = this.code;
data.language = this.language;
}
deserialize( data ) {
super.deserialize( data );
this.code = data.code;
this.language = data.language;
}
}
/**
* TSL function for creating a code node.
*
* @function
* @param {String} [code=''] - The native code.
* @param {Array<Node>} [includes=[]] - An array of includes.
* @param {('js'|'wgsl'|'glsl')} [language=''] - The used language.
* @returns {CodeNode}
*/
const code = /*@__PURE__*/ nodeProxy( CodeNode );
/**
* TSL function for creating a JS code node.
*
* @function
* @param {String} src - The native code.
* @param {Array<Node>} includes - An array of includes.
* @returns {CodeNode}
*/
const js = ( src, includes ) => code( src, includes, 'js' );
/**
* TSL function for creating a WGSL code node.
*
* @function
* @param {String} src - The native code.
* @param {Array<Node>} includes - An array of includes.
* @returns {CodeNode}
*/
const wgsl = ( src, includes ) => code( src, includes, 'wgsl' );
/**
* TSL function for creating a GLSL code node.
*
* @function
* @param {String} src - The native code.
* @param {Array<Node>} includes - An array of includes.
* @returns {CodeNode}
*/
const glsl = ( src, includes ) => code( src, includes, 'glsl' );
/**
* This class represents a native shader function. It can be used to implement
* certain aspects of a node material with native shader code. There are two predefined
* TSL functions for easier usage.
*
* - `wgslFn`: Creates a WGSL function node.
* - `glslFn`: Creates a GLSL function node.
*
* A basic example with one include looks like so:
*
* ```js
* const desaturateWGSLFn = wgslFn( `
* fn desaturate( color:vec3<f32> ) -> vec3<f32> {
* let lum = vec3<f32>( 0.299, 0.587, 0.114 );
* return vec3<f32>( dot( lum, color ) );
* }`
*);
* const someWGSLFn = wgslFn( `
* fn someFn( color:vec3<f32> ) -> vec3<f32> {
* return desaturate( color );
* }
* `, [ desaturateWGSLFn ] );
* material.colorNode = someWGSLFn( { color: texture( map ) } );
*```
* @augments CodeNode
*/
class FunctionNode extends CodeNode {
static get type() {
return 'FunctionNode';
}
/**
* Constructs a new function node.
*
* @param {String} [code=''] - The native code.
* @param {Array<Node>} [includes=[]] - An array of includes.
* @param {('js'|'wgsl'|'glsl')} [language=''] - The used language.
*/
constructor( code = '', includes = [], language = '' ) {
super( code, includes, language );
}
getNodeType( builder ) {
return this.getNodeFunction( builder ).type;
}
/**
* Returns the inputs of this function node.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {Array<NodeFunctionInput>} The inputs.
*/
getInputs( builder ) {
return this.getNodeFunction( builder ).inputs;
}
/**
* Returns the node function for this function node.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {NodeFunction} The node function.
*/
getNodeFunction( builder ) {
const nodeData = builder.getDataFromNode( this );
let nodeFunction = nodeData.nodeFunction;
if ( nodeFunction === undefined ) {
nodeFunction = builder.parser.parseFunction( this.code );
nodeData.nodeFunction = nodeFunction;
}
return nodeFunction;
}
generate( builder, output ) {
super.generate( builder );
const nodeFunction = this.getNodeFunction( builder );
const name = nodeFunction.name;
const type = nodeFunction.type;
const nodeCode = builder.getCodeFromNode( this, type );
if ( name !== '' ) {
// use a custom property name
nodeCode.name = name;
}
const propertyName = builder.getPropertyName( nodeCode );
const code = this.getNodeFunction( builder ).getCode( propertyName );
nodeCode.code = code + '\n';
if ( output === 'property' ) {
return propertyName;
} else {
return builder.format( `${ propertyName }()`, type, output );
}
}
}
const nativeFn = ( code, includes = [], language = '' ) => {
for ( let i = 0; i < includes.length; i ++ ) {
const include = includes[ i ];
// TSL Function: glslFn, wgslFn
if ( typeof include === 'function' ) {
includes[ i ] = include.functionNode;
}
}
const functionNode = nodeObject( new FunctionNode( code, includes, language ) );
const fn = ( ...params ) => functionNode.call( ...params );
fn.functionNode = functionNode;
return fn;
};
const glslFn = ( code, includes ) => nativeFn( code, includes, 'glsl' );
const wgslFn = ( code, includes ) => nativeFn( code, includes, 'wgsl' );
/** @module ScriptableValueNode **/
/**
* `ScriptableNode` uses this class to manage script inputs and outputs.
*
* @augments Node
*/
class ScriptableValueNode extends Node {
static get type() {
return 'ScriptableValueNode';
}
/**
* Constructs a new scriptable node.
*
* @param {Any} [value=null] - The value.
*/
constructor( value = null ) {
super();
/**
* A reference to the value.
*
* @private
* @default null
*/
this._value = value;
/**
* Depending on the type of `_value`, this property might cache parsed data.
*
* @private
* @default null
*/
this._cache = null;
/**
* If this node represents an input, this property represents the input type.
*
* @type {String?}
* @default null
*/
this.inputType = null;
/**
* If this node represents an output, this property represents the output type.
*
* @type {String?}
* @default null
*/
this.outputType = null;
/**
* An event dispatcher for managing events.
*
* @type {EventDispatcher}
*/
this.events = new EventDispatcher();
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isScriptableValueNode = true;
}
/**
* Whether this node represents an output or not.
*
* @type {Boolean}
* @readonly
* @default true
*/
get isScriptableOutputNode() {
return this.outputType !== null;
}
set value( val ) {
if ( this._value === val ) return;
if ( this._cache && this.inputType === 'URL' && this.value.value instanceof ArrayBuffer ) {
URL.revokeObjectURL( this._cache );
this._cache = null;
}
this._value = val;
this.events.dispatchEvent( { type: 'change' } );
this.refresh();
}
/**
* The node's value.
*
* @type {Any}
*/
get value() {
return this._value;
}
/**
* Dispatches the `refresh` event.
*/
refresh() {
this.events.dispatchEvent( { type: 'refresh' } );
}
/**
* The `value` property usually represents a node or even binary data in form of array buffers.
* In this case, this method tries to return the actual value behind the complex type.
*
* @return {Any} The value.
*/
getValue() {
const value = this.value;
if ( value && this._cache === null && this.inputType === 'URL' && value.value instanceof ArrayBuffer ) {
this._cache = URL.createObjectURL( new Blob( [ value.value ] ) );
} else if ( value && value.value !== null && value.value !== undefined && (
( ( this.inputType === 'URL' || this.inputType === 'String' ) && typeof value.value === 'string' ) ||
( this.inputType === 'Number' && typeof value.value === 'number' ) ||
( this.inputType === 'Vector2' && value.value.isVector2 ) ||
( this.inputType === 'Vector3' && value.value.isVector3 ) ||
( this.inputType === 'Vector4' && value.value.isVector4 ) ||
( this.inputType === 'Color' && value.value.isColor ) ||
( this.inputType === 'Matrix3' && value.value.isMatrix3 ) ||
( this.inputType === 'Matrix4' && value.value.isMatrix4 )
) ) {
return value.value;
}
return this._cache || value;
}
/**
* Overwritten since the node type is inferred from the value.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The node type.
*/
getNodeType( builder ) {
return this.value && this.value.isNode ? this.value.getNodeType( builder ) : 'float';
}
setup() {
return this.value && this.value.isNode ? this.value : float();
}
serialize( data ) {
super.serialize( data );
if ( this.value !== null ) {
if ( this.inputType === 'ArrayBuffer' ) {
data.value = arrayBufferToBase64( this.value );
} else {
data.value = this.value ? this.value.toJSON( data.meta ).uuid : null;
}
} else {
data.value = null;
}
data.inputType = this.inputType;
data.outputType = this.outputType;
}
deserialize( data ) {
super.deserialize( data );
let value = null;
if ( data.value !== null ) {
if ( data.inputType === 'ArrayBuffer' ) {
value = base64ToArrayBuffer( data.value );
} else if ( data.inputType === 'Texture' ) {
value = data.meta.textures[ data.value ];
} else {
value = data.meta.nodes[ data.value ] || null;
}
}
this.value = value;
this.inputType = data.inputType;
this.outputType = data.outputType;
}
}
/**
* TSL function for creating a scriptable value node.
*
* @function
* @param {Any} [value=null] - The value.
* @returns {ScriptableValueNode}
*/
const scriptableValue = /*@__PURE__*/ nodeProxy( ScriptableValueNode );
/** @module ScriptableNode **/
/**
* A Map-like data structure for managing resources of scriptable nodes.
*
* @augments Map
*/
class Resources extends Map {
get( key, callback = null, ...params ) {
if ( this.has( key ) ) return super.get( key );
if ( callback !== null ) {
const value = callback( ...params );
this.set( key, value );
return value;
}
}
}
class Parameters {
constructor( scriptableNode ) {
this.scriptableNode = scriptableNode;
}
get parameters() {
return this.scriptableNode.parameters;
}
get layout() {
return this.scriptableNode.getLayout();
}
getInputLayout( id ) {
return this.scriptableNode.getInputLayout( id );
}
get( name ) {
const param = this.parameters[ name ];
const value = param ? param.getValue() : null;
return value;
}
}
/**
* Defines the resources (e.g. namespaces) of scriptable nodes.
*
* @type {Resources}
*/
const ScriptableNodeResources = new Resources();
/**
* This type of node allows to implement nodes with custom scripts. The script
* section is represented as an instance of `CodeNode` written with JavaScript.
* The script itself must adhere to a specific structure.
*
* - main(): Executed once by default and every time `node.needsUpdate` is set.
* - layout: The layout object defines the script's interface (inputs and outputs).
*
* ```js
* ScriptableNodeResources.set( 'TSL', TSL );
*
* const scriptableNode = scriptable( js( `
* layout = {
* outputType: 'node',
* elements: [
* { name: 'source', inputType: 'node' },
* ]
* };
*
* const { mul, oscSine } = TSL;
*
* function main() {
* const source = parameters.get( 'source' ) || float();
* return mul( source, oscSine() ) );
* }
*
* ` ) );
*
* scriptableNode.setParameter( 'source', color( 1, 0, 0 ) );
*
* const material = new THREE.MeshBasicNodeMaterial();
* material.colorNode = scriptableNode;
* ```
*
* @augments Node
*/
class ScriptableNode extends Node {
static get type() {
return 'ScriptableNode';
}
/**
* Constructs a new scriptable node.
*
* @param {CodeNode?} [codeNode=null] - The code node.
* @param {Object} [parameters={}] - The parameters definition.
*/
constructor( codeNode = null, parameters = {} ) {
super();
/**
* The code node.
*
* @type {CodeNode?}
* @default null
*/
this.codeNode = codeNode;
/**
* The parameters definition.
*
* @type {Object}
* @default {}
*/
this.parameters = parameters;
this._local = new Resources();
this._output = scriptableValue();
this._outputs = {};
this._source = this.source;
this._method = null;
this._object = null;
this._value = null;
this._needsOutputUpdate = true;
this.onRefresh = this.onRefresh.bind( this );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isScriptableNode = true;
}
/**
* The source code of the scriptable node.
*
* @type {String}
*/
get source() {
return this.codeNode ? this.codeNode.code : '';
}
/**
* Sets the reference of a local script variable.
*
* @param {String} name - The variable name.
* @param {Object} value - The reference to set.
* @return {Resources} The resource map
*/
setLocal( name, value ) {
return this._local.set( name, value );
}
/**
* Gets the value of a local script variable.
*
* @param {String} name - The variable name.
* @return {Object} The value.
*/
getLocal( name ) {
return this._local.get( name );
}
/**
* Event listener for the `refresh` event.
*/
onRefresh() {
this._refresh();
}
/**
* Returns an input from the layout with the given id/name.
*
* @param {String} id - The id/name of the input.
* @return {Object} The element entry.
*/
getInputLayout( id ) {
for ( const element of this.getLayout() ) {
if ( element.inputType && ( element.id === id || element.name === id ) ) {
return element;
}
}
}
/**
* Returns an output from the layout with the given id/name.
*
* @param {String} id - The id/name of the output.
* @return {Object} The element entry.
*/
getOutputLayout( id ) {
for ( const element of this.getLayout() ) {
if ( element.outputType && ( element.id === id || element.name === id ) ) {
return element;
}
}
}
/**
* Defines a script output for the given name and value.
*
* @param {String} name - The name of the output.
* @param {Node} value - The node value.
* @return {ScriptableNode} A reference to this node.
*/
setOutput( name, value ) {
const outputs = this._outputs;
if ( outputs[ name ] === undefined ) {
outputs[ name ] = scriptableValue( value );
} else {
outputs[ name ].value = value;
}
return this;
}
/**
* Returns a script output for the given name.
*
* @param {String} name - The name of the output.
* @return {ScriptableValueNode} The node value.
*/
getOutput( name ) {
return this._outputs[ name ];
}
/**
* Returns a parameter for the given name
*
* @param {String} name - The name of the parameter.
* @return {ScriptableValueNode} The node value.
*/
getParameter( name ) {
return this.parameters[ name ];
}
/**
* Sets a value for the given parameter name.
*
* @param {String} name - The parameter name.
* @param {Any} value - The parameter value.
* @return {ScriptableNode} A reference to this node.
*/
setParameter( name, value ) {
const parameters = this.parameters;
if ( value && value.isScriptableNode ) {
this.deleteParameter( name );
parameters[ name ] = value;
parameters[ name ].getDefaultOutput().events.addEventListener( 'refresh', this.onRefresh );
} else if ( value && value.isScriptableValueNode ) {
this.deleteParameter( name );
parameters[ name ] = value;
parameters[ name ].events.addEventListener( 'refresh', this.onRefresh );
} else if ( parameters[ name ] === undefined ) {
parameters[ name ] = scriptableValue( value );
parameters[ name ].events.addEventListener( 'refresh', this.onRefresh );
} else {
parameters[ name ].value = value;
}
return this;
}
/**
* Returns the value of this node which is the value of
* the default output.
*
* @return {Node} The value.
*/
getValue() {
return this.getDefaultOutput().getValue();
}
/**
* Deletes a parameter from the script.
*
* @param {String} name - The parameter to remove.
* @return {ScriptableNode} A reference to this node.
*/
deleteParameter( name ) {
let valueNode = this.parameters[ name ];
if ( valueNode ) {
if ( valueNode.isScriptableNode ) valueNode = valueNode.getDefaultOutput();
valueNode.events.removeEventListener( 'refresh', this.onRefresh );
}
return this;
}
/**
* Deletes all parameters from the script.
*
* @return {ScriptableNode} A reference to this node.
*/
clearParameters() {
for ( const name of Object.keys( this.parameters ) ) {
this.deleteParameter( name );
}
this.needsUpdate = true;
return this;
}
/**
* Calls a function from the script.
*
* @param {String} name - The function name.
* @param {...Any} params - A list of parameters.
* @return {Any} The result of the function call.
*/
call( name, ...params ) {
const object = this.getObject();
const method = object[ name ];
if ( typeof method === 'function' ) {
return method( ...params );
}
}
/**
* Asynchronously calls a function from the script.
*
* @param {String} name - The function name.
* @param {...Any} params - A list of parameters.
* @return {Promise<Any>} The result of the function call.
*/
async callAsync( name, ...params ) {
const object = this.getObject();
const method = object[ name ];
if ( typeof method === 'function' ) {
return method.constructor.name === 'AsyncFunction' ? await method( ...params ) : method( ...params );
}
}
/**
* Overwritten since the node types is inferred from the script's output.
*
* @param {NodeBuilder} builder - The current node builder
* @return {String} The node type.
*/
getNodeType( builder ) {
return this.getDefaultOutputNode().getNodeType( builder );
}
/**
* Refreshes the script node.
*
* @param {String?} [output=null] - An optional output.
*/
refresh( output = null ) {
if ( output !== null ) {
this.getOutput( output ).refresh();
} else {
this._refresh();
}
}
/**
* Returns an object representation of the script.
*
* @return {Object} The result object.
*/
getObject() {
if ( this.needsUpdate ) this.dispose();
if ( this._object !== null ) return this._object;
//
const refresh = () => this.refresh();
const setOutput = ( id, value ) => this.setOutput( id, value );
const parameters = new Parameters( this );
const THREE = ScriptableNodeResources.get( 'THREE' );
const TSL = ScriptableNodeResources.get( 'TSL' );
const method = this.getMethod();
const params = [ parameters, this._local, ScriptableNodeResources, refresh, setOutput, THREE, TSL ];
this._object = method( ...params );
const layout = this._object.layout;
if ( layout ) {
if ( layout.cache === false ) {
this._local.clear();
}
// default output
this._output.outputType = layout.outputType || null;
if ( Array.isArray( layout.elements ) ) {
for ( const element of layout.elements ) {
const id = element.id || element.name;
if ( element.inputType ) {
if ( this.getParameter( id ) === undefined ) this.setParameter( id, null );
this.getParameter( id ).inputType = element.inputType;
}
if ( element.outputType ) {
if ( this.getOutput( id ) === undefined ) this.setOutput( id, null );
this.getOutput( id ).outputType = element.outputType;
}
}
}
}
return this._object;
}
deserialize( data ) {
super.deserialize( data );
for ( const name in this.parameters ) {
let valueNode = this.parameters[ name ];
if ( valueNode.isScriptableNode ) valueNode = valueNode.getDefaultOutput();
valueNode.events.addEventListener( 'refresh', this.onRefresh );
}
}
/**
* Returns the layout of the script.
*
* @return {Object} The script's layout.
*/
getLayout() {
return this.getObject().layout;
}
/**
* Returns default node output of the script.
*
* @return {Node} The default node output.
*/
getDefaultOutputNode() {
const output = this.getDefaultOutput().value;
if ( output && output.isNode ) {
return output;
}
return float();
}
/**
* Returns default output of the script.
*
* @return {ScriptableValueNode} The default output.
*/
getDefaultOutput() {
return this._exec()._output;
}
/**
* Returns a function created from the node's script.
*
* @return {Function} The function representing the node's code.
*/
getMethod() {
if ( this.needsUpdate ) this.dispose();
if ( this._method !== null ) return this._method;
//
const parametersProps = [ 'parameters', 'local', 'global', 'refresh', 'setOutput', 'THREE', 'TSL' ];
const interfaceProps = [ 'layout', 'init', 'main', 'dispose' ];
const properties = interfaceProps.join( ', ' );
const declarations = 'var ' + properties + '; var output = {};\n';
const returns = '\nreturn { ...output, ' + properties + ' };';
const code = declarations + this.codeNode.code + returns;
//
this._method = new Function( ...parametersProps, code );
return this._method;
}
/**
* Frees all internal resources.
*/
dispose() {
if ( this._method === null ) return;
if ( this._object && typeof this._object.dispose === 'function' ) {
this._object.dispose();
}
this._method = null;
this._object = null;
this._source = null;
this._value = null;
this._needsOutputUpdate = true;
this._output.value = null;
this._outputs = {};
}
setup() {
return this.getDefaultOutputNode();
}
getCacheKey( force ) {
const values = [ hashString( this.source ), this.getDefaultOutputNode().getCacheKey( force ) ];
for ( const param in this.parameters ) {
values.push( this.parameters[ param ].getCacheKey( force ) );
}
return hashArray( values );
}
set needsUpdate( value ) {
if ( value === true ) this.dispose();
}
get needsUpdate() {
return this.source !== this._source;
}
/**
* Executes the `main` function of the script.
*
* @private
* @return {ScriptableNode} A reference to this node.
*/
_exec() {
if ( this.codeNode === null ) return this;
if ( this._needsOutputUpdate === true ) {
this._value = this.call( 'main' );
this._needsOutputUpdate = false;
}
this._output.value = this._value;
return this;
}
/**
* Executes the refresh.
*
* @private
*/
_refresh() {
this.needsUpdate = true;
this._exec();
this._output.refresh();
}
}
/**
* TSL function for creating a scriptable node.
*
* @function
* @param {CodeNode?} [codeNode=null] - The code node.
* @param {Object} [parameters={}] - The parameters definition.
* @returns {ScriptableNode}
*/
const scriptable = /*@__PURE__*/ nodeProxy( ScriptableNode );
/** @module Fog **/
/**
* Returns a node that represents the `z` coordinate in view space
* for the current fragment. It's a different representation of the
* default depth value.
*
* This value can be part of a computation that defines how the fog
* density increases when moving away from the camera.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {Node} The viewZ node.
*/
function getViewZNode( builder ) {
let viewZ;
const getViewZ = builder.context.getViewZ;
if ( getViewZ !== undefined ) {
viewZ = getViewZ( this );
}
return ( viewZ || positionView.z ).negate();
}
/**
* Constructs a new range factor node.
*
* @function
* @param {Node} near - Defines the near value.
* @param {Node} far - Defines the far value.
*/
const rangeFogFactor = Fn( ( [ near, far ], builder ) => {
const viewZ = getViewZNode( builder );
return smoothstep( near, far, viewZ );
} );
/**
* Represents an exponential squared fog. This type of fog gives
* a clear view near the camera and a faster than exponentially
* densening fog farther from the camera.
*
* @function
* @param {Node} density - Defines the fog density.
*/
const densityFogFactor = Fn( ( [ density ], builder ) => {
const viewZ = getViewZNode( builder );
return density.mul( density, viewZ, viewZ ).negate().exp().oneMinus();
} );
/**
* This class can be used to configure a fog for the scene.
* Nodes of this type are assigned to `Scene.fogNode`.
*
* @function
* @param {Node} color - Defines the color of the fog.
* @param {Node} factor - Defines how the fog is factored in the scene.
*/
const fog = Fn( ( [ color, factor ] ) => {
return vec4( factor.toFloat().mix( output.rgb, color.toVec3() ), output.a );
} );
// Deprecated
/**
* @function
* @deprecated since r171. Use `fog( color, rangeFogFactor( near, far ) )` instead.
*
* @param {Node} color
* @param {Node} near
* @param {Node} far
* @returns {Function}
*/
function rangeFog( color, near, far ) { // @deprecated, r171
console.warn( 'THREE.TSL: "rangeFog( color, near, far )" is deprecated. Use "fog( color, rangeFogFactor( near, far ) )" instead.' );
return fog( color, rangeFogFactor( near, far ) );
}
/**
* @function
* @deprecated since r171. Use `fog( color, densityFogFactor( density ) )` instead.
*
* @param {Node} color
* @param {Node} density
* @returns {Function}
*/
function densityFog( color, density ) { // @deprecated, r171
console.warn( 'THREE.TSL: "densityFog( color, density )" is deprecated. Use "fog( color, densityFogFactor( density ) )" instead.' );
return fog( color, densityFogFactor( density ) );
}
/** @module RangeNode **/
let min = null;
let max = null;
/**
* `RangeNode` generates random instanced attribute data in a defined range.
* An exemplary use case for this utility node is to generate random per-instance
* colors:
* ```js
* const material = new MeshBasicNodeMaterial();
* material.colorNode = range( new Color( 0x000000 ), new Color( 0xFFFFFF ) );
* const mesh = new InstancedMesh( geometry, material, count );
* ```
* @augments Node
*/
class RangeNode extends Node {
static get type() {
return 'RangeNode';
}
/**
* Constructs a new range node.
*
* @param {Node<any>} [minNode=float()] - A node defining the lower bound of the range.
* @param {Node<any>} [maxNode=float()] - A node defining the upper bound of the range.
*/
constructor( minNode = float(), maxNode = float() ) {
super();
/**
* A node defining the lower bound of the range.
*
* @type {Node<any>}
* @default float()
*/
this.minNode = minNode;
/**
* A node defining the upper bound of the range.
*
* @type {Node<any>}
* @default float()
*/
this.maxNode = maxNode;
}
/**
* Returns the vector length which is computed based on the range definition.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {Number} The vector length.
*/
getVectorLength( builder ) {
const minLength = builder.getTypeLength( getValueType( this.minNode.value ) );
const maxLength = builder.getTypeLength( getValueType( this.maxNode.value ) );
return minLength > maxLength ? minLength : maxLength;
}
/**
* This method is overwritten since the node type is inferred from range definition.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The node type.
*/
getNodeType( builder ) {
return builder.object.count > 1 ? builder.getTypeFromLength( this.getVectorLength( builder ) ) : 'float';
}
setup( builder ) {
const object = builder.object;
let output = null;
if ( object.count > 1 ) {
const minValue = this.minNode.value;
const maxValue = this.maxNode.value;
const minLength = builder.getTypeLength( getValueType( minValue ) );
const maxLength = builder.getTypeLength( getValueType( maxValue ) );
min = min || new Vector4();
max = max || new Vector4();
min.setScalar( 0 );
max.setScalar( 0 );
if ( minLength === 1 ) min.setScalar( minValue );
else if ( minValue.isColor ) min.set( minValue.r, minValue.g, minValue.b, 1 );
else min.set( minValue.x, minValue.y, minValue.z || 0, minValue.w || 0 );
if ( maxLength === 1 ) max.setScalar( maxValue );
else if ( maxValue.isColor ) max.set( maxValue.r, maxValue.g, maxValue.b, 1 );
else max.set( maxValue.x, maxValue.y, maxValue.z || 0, maxValue.w || 0 );
const stride = 4;
const length = stride * object.count;
const array = new Float32Array( length );
for ( let i = 0; i < length; i ++ ) {
const index = i % stride;
const minElementValue = min.getComponent( index );
const maxElementValue = max.getComponent( index );
array[ i ] = MathUtils.lerp( minElementValue, maxElementValue, Math.random() );
}
const nodeType = this.getNodeType( builder );
if ( object.count <= 4096 ) {
output = buffer( array, 'vec4', object.count ).element( instanceIndex ).convert( nodeType );
} else {
// TODO: Improve anonymous buffer attribute creation removing this part
const bufferAttribute = new InstancedBufferAttribute( array, 4 );
builder.geometry.setAttribute( '__range' + this.id, bufferAttribute );
output = instancedBufferAttribute( bufferAttribute ).convert( nodeType );
}
} else {
output = float( 0 );
}
return output;
}
}
/**
* TSL function for creating a range node.
*
* @function
* @param {Node<any>} [minNode=float()] - A node defining the lower bound of the range.
* @param {Node<any>} [maxNode=float()] - A node defining the upper bound of the range.
* @returns {RangeNode}
*/
const range = /*@__PURE__*/ nodeProxy( RangeNode );
/** @module ComputeBuiltinNode **/
/**
* `ComputeBuiltinNode` represents a compute-scope builtin value that expose information
* about the currently running dispatch and/or the device it is running on.
*
* This node can only be used with a WebGPU backend.
*
* @augments Node
*/
class ComputeBuiltinNode extends Node {
static get type() {
return 'ComputeBuiltinNode';
}
/**
* Constructs a new compute builtin node.
*
* @param {String} builtinName - The built-in name.
* @param {String} nodeType - The node type.
*/
constructor( builtinName, nodeType ) {
super( nodeType );
/**
* The built-in name.
*
* @private
* @type {String}
*/
this._builtinName = builtinName;
}
/**
* This method is overwritten since hash is derived from the built-in name.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The hash.
*/
getHash( builder ) {
return this.getBuiltinName( builder );
}
/**
* This method is overwritten since the node type is simply derived from `nodeType`..
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The node type.
*/
getNodeType( /*builder*/ ) {
return this.nodeType;
}
/**
* Sets the builtin name.
*
* @param {String} builtinName - The built-in name.
* @return {ComputeBuiltinNode} A reference to this node.
*/
setBuiltinName( builtinName ) {
this._builtinName = builtinName;
return this;
}
/**
* Returns the builtin name.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The builtin name.
*/
getBuiltinName( /*builder*/ ) {
return this._builtinName;
}
/**
* Whether the current node builder has the builtin or not.
*
* @param {NodeBuilder} builder - The current node builder.
*/
hasBuiltin( builder ) {
builder.hasBuiltin( this._builtinName );
}
generate( builder, output ) {
const builtinName = this.getBuiltinName( builder );
const nodeType = this.getNodeType( builder );
if ( builder.shaderStage === 'compute' ) {
return builder.format( builtinName, nodeType, output );
} else {
console.warn( `ComputeBuiltinNode: Compute built-in value ${builtinName} can not be accessed in the ${builder.shaderStage} stage` );
return builder.generateConst( nodeType );
}
}
serialize( data ) {
super.serialize( data );
data.global = this.global;
data._builtinName = this._builtinName;
}
deserialize( data ) {
super.deserialize( data );
this.global = data.global;
this._builtinName = data._builtinName;
}
}
/**
* TSL function for creating a compute builtin node.
*
* @function
* @param {String} name - The built-in name.
* @param {String} nodeType - The node type.
* @returns {ComputeBuiltinNode}
*/
const computeBuiltin = ( name, nodeType ) => nodeObject( new ComputeBuiltinNode( name, nodeType ) );
/**
* TSL function for creating a `numWorkgroups` builtin node.
* Represents the number of workgroups dispatched by the compute shader.
* ```js
* // Run 512 invocations/threads with a workgroup size of 128.
* const computeFn = Fn(() => {
*
* // numWorkgroups.x = 4
* storageBuffer.element(0).assign(numWorkgroups.x)
*
* })().compute(512, [128]);
*
* // Run 512 invocations/threads with the default workgroup size of 64.
* const computeFn = Fn(() => {
*
* // numWorkgroups.x = 8
* storageBuffer.element(0).assign(numWorkgroups.x)
*
* })().compute(512);
* ```
*
* @function
* @returns {ComputeBuiltinNode<uvec3>}
*/
const numWorkgroups = /*@__PURE__*/ computeBuiltin( 'numWorkgroups', 'uvec3' );
/**
* TSL function for creating a `workgroupId` builtin node.
* Represents the 3-dimensional index of the workgroup the current compute invocation belongs to.
* ```js
* // Execute 12 compute threads with a workgroup size of 3.
* const computeFn = Fn( () => {
*
* If( workgroupId.x.modInt( 2 ).equal( 0 ), () => {
*
* storageBuffer.element( instanceIndex ).assign( instanceIndex );
*
* } ).Else( () => {
*
* storageBuffer.element( instanceIndex ).assign( 0 );
*
* } );
*
* } )().compute( 12, [ 3 ] );
*
* // workgroupId.x = [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3];
* // Buffer Output = [0, 1, 2, 0, 0, 0, 6, 7, 8, 0, 0, 0];
* ```
*
* @function
* @returns {ComputeBuiltinNode<uvec3>}
*/
const workgroupId = /*@__PURE__*/ computeBuiltin( 'workgroupId', 'uvec3' );
/**
* TSL function for creating a `globalId` builtin node. A non-linearized 3-dimensional
* representation of the current invocation's position within a 3D global grid.
*
* @function
* @returns {ComputeBuiltinNode<uvec3>}
*/
const globalId = /*@__PURE__*/ computeBuiltin( 'globalId', 'uvec3' );
/**
* TSL function for creating a `localId` builtin node. A non-linearized 3-dimensional
* representation of the current invocation's position within a 3D workgroup grid.
*
* @function
* @returns {ComputeBuiltinNode<uvec3>}
*/
const localId = /*@__PURE__*/ computeBuiltin( 'localId', 'uvec3' );
/**
* TSL function for creating a `subgroupSize` builtin node. A device dependent variable
* that exposes the size of the current invocation's subgroup.
*
* @function
* @returns {ComputeBuiltinNode<uint>}
*/
const subgroupSize = /*@__PURE__*/ computeBuiltin( 'subgroupSize', 'uint' );
/** @module BarrierNode **/
/**
* Represents a GPU control barrier that synchronizes compute operations within a given scope.
*
* This node can only be used with a WebGPU backend.
*
* @augments Node
*/
class BarrierNode extends Node {
/**
* Constructs a new barrier node.
*
* @param {String} scope - The scope defines the behavior of the node.
*/
constructor( scope ) {
super();
this.scope = scope;
}
generate( builder ) {
const { scope } = this;
const { renderer } = builder;
if ( renderer.backend.isWebGLBackend === true ) {
builder.addFlowCode( `\t// ${scope}Barrier \n` );
} else {
builder.addLineFlowCode( `${scope}Barrier()`, this );
}
}
}
/**
* TSL function for creating a barrier node.
*
* @function
* @param {String} scope - The scope defines the behavior of the node..
* @returns {BarrierNode}
*/
const barrier = nodeProxy( BarrierNode );
/**
* TSL function for creating a workgroup barrier. All compute shader
* invocations must wait for each invocation within a workgroup to
* complete before the barrier can be surpassed.
*
* @function
* @returns {BarrierNode}
*/
const workgroupBarrier = () => barrier( 'workgroup' ).append();
/**
* TSL function for creating a storage barrier. All invocations must
* wait for each access to variables within the 'storage' address space
* to complete before the barrier can be passed.
*
* @function
* @returns {BarrierNode}
*/
const storageBarrier = () => barrier( 'storage' ).append();
/**
* TSL function for creating a texture barrier. All invocations must
* wait for each access to variables within the 'texture' address space
* to complete before the barrier can be passed.
*
* @function
* @returns {BarrierNode}
*/
const textureBarrier = () => barrier( 'texture' ).append();
/** @module WorkgroupInfoNode **/
/**
* Represents an element of a 'workgroup' scoped buffer.
*
* @augments ArrayElementNode
*/
class WorkgroupInfoElementNode extends ArrayElementNode {
/**
* Constructs a new workgroup info element node.
*
* @param {Node} workgroupInfoNode - The workgroup info node.
* @param {Node} indexNode - The index node that defines the element access.
*/
constructor( workgroupInfoNode, indexNode ) {
super( workgroupInfoNode, indexNode );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isWorkgroupInfoElementNode = true;
}
generate( builder, output ) {
let snippet;
const isAssignContext = builder.context.assign;
snippet = super.generate( builder );
if ( isAssignContext !== true ) {
const type = this.getNodeType( builder );
snippet = builder.format( snippet, type, output );
}
// TODO: Possibly activate clip distance index on index access rather than from clipping context
return snippet;
}
}
/**
* A node allowing the user to create a 'workgroup' scoped buffer within the
* context of a compute shader. Typically, workgroup scoped buffers are
* created to hold data that is transferred from a global storage scope into
* a local workgroup scope. For invocations within a workgroup, data
* access speeds on 'workgroup' scoped buffers can be significantly faster
* than similar access operations on globally accessible storage buffers.
*
* This node can only be used with a WebGPU backend.
*
* @augments Node
*/
class WorkgroupInfoNode extends Node {
/**
* Constructs a new buffer scoped to type scope.
*
* @param {String} scope - TODO.
* @param {String} bufferType - The data type of a 'workgroup' scoped buffer element.
* @param {Number} [bufferCount=0] - The number of elements in the buffer.
*/
constructor( scope, bufferType, bufferCount = 0 ) {
super( bufferType );
/**
* The buffer type.
*
* @type {String}
*/
this.bufferType = bufferType;
/**
* The buffer count.
*
* @type {Number}
* @default 0
*/
this.bufferCount = bufferCount;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isWorkgroupInfoNode = true;
/**
* The data type of the array buffer.
*
* @type {String}
*/
this.elementType = bufferType;
/**
* TODO.
*
* @type {String}
*/
this.scope = scope;
}
/**
* Sets the name/label of this node.
*
* @param {String} name - The name to set.
* @return {WorkgroupInfoNode} A reference to this node.
*/
label( name ) {
this.name = name;
return this;
}
/**
* Sets the scope of this node.
*
* @param {String} scope - The scope to set.
* @return {WorkgroupInfoNode} A reference to this node.
*/
setScope( scope ) {
this.scope = scope;
return this;
}
/**
* The data type of the array buffer.
*
* @return {String} The element type.
*/
getElementType() {
return this.elementType;
}
/**
* Overwrites the default implementation since the input type
* is inferred from the scope.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The input type.
*/
getInputType( /*builder*/ ) {
return `${this.scope}Array`;
}
/**
* This method can be used to access elements via an index node.
*
* @param {IndexNode} indexNode - indexNode.
* @return {WorkgroupInfoElementNode} A reference to an element.
*/
element( indexNode ) {
return nodeObject( new WorkgroupInfoElementNode( this, indexNode ) );
}
generate( builder ) {
return builder.getScopedArray( this.name || `${this.scope}Array_${this.id}`, this.scope.toLowerCase(), this.bufferType, this.bufferCount );
}
}
/**
* TSL function for creating a workgroup info node.
* Creates a new 'workgroup' scoped array buffer.
*
* @function
* @param {String} type - The data type of a 'workgroup' scoped buffer element.
* @param {Number} [count=0] - The number of elements in the buffer.
* @returns {WorkgroupInfoNode}
*/
const workgroupArray = ( type, count ) => nodeObject( new WorkgroupInfoNode( 'Workgroup', type, count ) );
/** @module AtomicFunctionNode **/
/**
* `AtomicFunctionNode` represents any function that can operate on atomic variable types
* within a shader. In an atomic function, any modification to an atomic variable will
* occur as an indivisible step with a defined order relative to other modifications.
* Accordingly, even if multiple atomic functions are modifying an atomic variable at once
* atomic operations will not interfere with each other.
*
* This node can only be used with a WebGPU backend.
*
* @augments TempNode
*/
class AtomicFunctionNode extends TempNode {
static get type() {
return 'AtomicFunctionNode';
}
/**
* Constructs a new atomic function node.
*
* @param {String} method - The signature of the atomic function to construct.
* @param {Node} pointerNode - An atomic variable or element of an atomic buffer.
* @param {Node} valueNode - The value that mutates the atomic variable.
* @param {Node?} [storeNode=null] - A variable storing the return value of an atomic operation, typically the value of the atomic variable before the operation.
*/
constructor( method, pointerNode, valueNode, storeNode = null ) {
super( 'uint' );
/**
* The signature of the atomic function to construct.
*
* @type {String}
*/
this.method = method;
/**
* An atomic variable or element of an atomic buffer.
*
* @type {Node}
*/
this.pointerNode = pointerNode;
/**
* A value that modifies the atomic variable.
*
* @type {Node}
*/
this.valueNode = valueNode;
/**
* A variable storing the return value of an atomic operation, typically the value of the atomic variable before the operation.
*
* @type {Node?}
* @default null
*/
this.storeNode = storeNode;
}
/**
* Overwrites the default implementation to return the type of
* the pointer node.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The input type.
*/
getInputType( builder ) {
return this.pointerNode.getNodeType( builder );
}
/**
* Overwritten since the node type is inferred from the input type.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {String} The node type.
*/
getNodeType( builder ) {
return this.getInputType( builder );
}
generate( builder ) {
const method = this.method;
const type = this.getNodeType( builder );
const inputType = this.getInputType( builder );
const a = this.pointerNode;
const b = this.valueNode;
const params = [];
params.push( `&${ a.build( builder, inputType ) }` );
if ( b !== null ) {
params.push( b.build( builder, inputType ) );
}
const methodSnippet = `${ builder.getMethod( method, type ) }( ${params.join( ', ' )} )`;
if ( this.storeNode !== null ) {
const varSnippet = this.storeNode.build( builder, inputType );
builder.addLineFlowCode( `${varSnippet} = ${methodSnippet}`, this );
} else {
builder.addLineFlowCode( methodSnippet, this );
}
}
}
AtomicFunctionNode.ATOMIC_LOAD = 'atomicLoad';
AtomicFunctionNode.ATOMIC_STORE = 'atomicStore';
AtomicFunctionNode.ATOMIC_ADD = 'atomicAdd';
AtomicFunctionNode.ATOMIC_SUB = 'atomicSub';
AtomicFunctionNode.ATOMIC_MAX = 'atomicMax';
AtomicFunctionNode.ATOMIC_MIN = 'atomicMin';
AtomicFunctionNode.ATOMIC_AND = 'atomicAnd';
AtomicFunctionNode.ATOMIC_OR = 'atomicOr';
AtomicFunctionNode.ATOMIC_XOR = 'atomicXor';
/**
* TSL function for creating an atomic function node.
*
* @function
* @param {String} method - The signature of the atomic function to construct.
* @param {Node} pointerNode - An atomic variable or element of an atomic buffer.
* @param {Node} valueNode - The value that mutates the atomic variable.
* @param {Node?} [storeNode=null] - A variable storing the return value of an atomic operation, typically the value of the atomic variable before the operation.
* @returns {AtomicFunctionNode}
*/
const atomicNode = nodeProxy( AtomicFunctionNode );
/**
* TSL function for appending an atomic function call into the programmatic flow of a compute shader.
*
* @function
* @param {String} method - The signature of the atomic function to construct.
* @param {Node} pointerNode - An atomic variable or element of an atomic buffer.
* @param {Node} valueNode - The value that mutates the atomic variable.
* @param {Node?} [storeNode=null] - A variable storing the return value of an atomic operation, typically the value of the atomic variable before the operation.
* @returns {AtomicFunctionNode}
*/
const atomicFunc = ( method, pointerNode, valueNode, storeNode = null ) => {
const node = atomicNode( method, pointerNode, valueNode, storeNode );
node.append();
return node;
};
/**
* Loads the value stored in the atomic variable.
*
* @function
* @param {Node} pointerNode - An atomic variable or element of an atomic buffer.
* @param {Node?} [storeNode=null] - A variable storing the return value of an atomic operation, typically the value of the atomic variable before the operation.
* @returns {AtomicFunctionNode}
*/
const atomicLoad = ( pointerNode, storeNode = null ) => atomicFunc( AtomicFunctionNode.ATOMIC_LOAD, pointerNode, null, storeNode );
/**
* Stores a value in the atomic variable.
*
* @function
* @param {Node} pointerNode - An atomic variable or element of an atomic buffer.
* @param {Node} valueNode - The value that mutates the atomic variable.
* @param {Node?} [storeNode=null] - A variable storing the return value of an atomic operation, typically the value of the atomic variable before the operation.
* @returns {AtomicFunctionNode}
*/
const atomicStore = ( pointerNode, valueNode, storeNode = null ) => atomicFunc( AtomicFunctionNode.ATOMIC_STORE, pointerNode, valueNode, storeNode );
/**
* Increments the value stored in the atomic variable.
*
* @function
* @param {Node} pointerNode - An atomic variable or element of an atomic buffer.
* @param {Node} valueNode - The value that mutates the atomic variable.
* @param {Node?} [storeNode=null] - A variable storing the return value of an atomic operation, typically the value of the atomic variable before the operation.
* @returns {AtomicFunctionNode}
*/
const atomicAdd = ( pointerNode, valueNode, storeNode = null ) => atomicFunc( AtomicFunctionNode.ATOMIC_ADD, pointerNode, valueNode, storeNode );
/**
* Decrements the value stored in the atomic variable.
*
* @function
* @param {Node} pointerNode - An atomic variable or element of an atomic buffer.
* @param {Node} valueNode - The value that mutates the atomic variable.
* @param {Node?} [storeNode=null] - A variable storing the return value of an atomic operation, typically the value of the atomic variable before the operation.
* @returns {AtomicFunctionNode}
*/
const atomicSub = ( pointerNode, valueNode, storeNode = null ) => atomicFunc( AtomicFunctionNode.ATOMIC_SUB, pointerNode, valueNode, storeNode );
/**
* Stores in an atomic variable the maximum between its current value and a parameter.
*
* @function
* @param {Node} pointerNode - An atomic variable or element of an atomic buffer.
* @param {Node} valueNode - The value that mutates the atomic variable.
* @param {Node?} [storeNode=null] - A variable storing the return value of an atomic operation, typically the value of the atomic variable before the operation.
* @returns {AtomicFunctionNode}
*/
const atomicMax = ( pointerNode, valueNode, storeNode = null ) => atomicFunc( AtomicFunctionNode.ATOMIC_MAX, pointerNode, valueNode, storeNode );
/**
* Stores in an atomic variable the minimum between its current value and a parameter.
*
* @function
* @param {Node} pointerNode - An atomic variable or element of an atomic buffer.
* @param {Node} valueNode - The value that mutates the atomic variable.
* @param {Node?} [storeNode=null] - A variable storing the return value of an atomic operation, typically the value of the atomic variable before the operation.
* @returns {AtomicFunctionNode}
*/
const atomicMin = ( pointerNode, valueNode, storeNode = null ) => atomicFunc( AtomicFunctionNode.ATOMIC_MIN, pointerNode, valueNode, storeNode );
/**
* Stores in an atomic variable the bitwise AND of its value with a parameter.
*
* @function
* @param {Node} pointerNode - An atomic variable or element of an atomic buffer.
* @param {Node} valueNode - The value that mutates the atomic variable.
* @param {Node?} [storeNode=null] - A variable storing the return value of an atomic operation, typically the value of the atomic variable before the operation.
* @returns {AtomicFunctionNode}
*/
const atomicAnd = ( pointerNode, valueNode, storeNode = null ) => atomicFunc( AtomicFunctionNode.ATOMIC_AND, pointerNode, valueNode, storeNode );
/**
* Stores in an atomic variable the bitwise OR of its value with a parameter.
*
* @function
* @param {Node} pointerNode - An atomic variable or element of an atomic buffer.
* @param {Node} valueNode - The value that mutates the atomic variable.
* @param {Node?} [storeNode=null] - A variable storing the return value of an atomic operation, typically the value of the atomic variable before the operation.
* @returns {AtomicFunctionNode}
*/
const atomicOr = ( pointerNode, valueNode, storeNode = null ) => atomicFunc( AtomicFunctionNode.ATOMIC_OR, pointerNode, valueNode, storeNode );
/**
* Stores in an atomic variable the bitwise XOR of its value with a parameter.
*
* @function
* @param {Node} pointerNode - An atomic variable or element of an atomic buffer.
* @param {Node} valueNode - The value that mutates the atomic variable.
* @param {Node?} [storeNode=null] - A variable storing the return value of an atomic operation, typically the value of the atomic variable before the operation.
* @returns {AtomicFunctionNode}
*/
const atomicXor = ( pointerNode, valueNode, storeNode = null ) => atomicFunc( AtomicFunctionNode.ATOMIC_XOR, pointerNode, valueNode, storeNode );
/** @module Lights **/
let uniformsLib;
function getLightData( light ) {
uniformsLib = uniformsLib || new WeakMap();
let uniforms = uniformsLib.get( light );
if ( uniforms === undefined ) uniformsLib.set( light, uniforms = {} );
return uniforms;
}
/**
* TSL function for getting a shadow matrix uniform node for the given light.
*
* @function
* @param {Light} light -The light source.
* @returns {UniformNode<mat4>} The shadow matrix uniform node.
*/
function lightShadowMatrix( light ) {
const data = getLightData( light );
return data.shadowMatrix || ( data.shadowMatrix = uniform( 'mat4' ).setGroup( renderGroup ).onRenderUpdate( () => {
if ( light.castShadow !== true ) {
light.shadow.updateMatrices( light );
}
return light.shadow.matrix;
} ) );
}
/**
* TSL function for getting projected uv coordinates for the given light.
* Relevant when using maps with spot lights.
*
* @function
* @param {Light} light -The light source.
* @returns {Node<vec3>} The projected uvs.
*/
function lightProjectionUV( light ) {
const data = getLightData( light );
if ( data.projectionUV === undefined ) {
const spotLightCoord = lightShadowMatrix( light ).mul( positionWorld );
data.projectionUV = spotLightCoord.xyz.div( spotLightCoord.w );
}
return data.projectionUV;
}
/**
* TSL function for getting the position in world space for the given light.
*
* @function
* @param {Light} light -The light source.
* @returns {UniformNode<vec3>} The light's position in world space.
*/
function lightPosition( light ) {
const data = getLightData( light );
return data.position || ( data.position = uniform( new Vector3() ).setGroup( renderGroup ).onRenderUpdate( ( _, self ) => self.value.setFromMatrixPosition( light.matrixWorld ) ) );
}
/**
* TSL function for getting the light target position in world space for the given light.
*
* @function
* @param {Light} light -The light source.
* @returns {UniformNode<vec3>} The light target position in world space.
*/
function lightTargetPosition( light ) {
const data = getLightData( light );
return data.targetPosition || ( data.targetPosition = uniform( new Vector3() ).setGroup( renderGroup ).onRenderUpdate( ( _, self ) => self.value.setFromMatrixPosition( light.target.matrixWorld ) ) );
}
/**
* TSL function for getting the position in view space for the given light.
*
* @function
* @param {Light} light -The light source.
* @returns {UniformNode<vec3>} The light's position in view space.
*/
function lightViewPosition( light ) {
const data = getLightData( light );
return data.viewPosition || ( data.viewPosition = uniform( new Vector3() ).setGroup( renderGroup ).onRenderUpdate( ( { camera }, self ) => {
self.value = self.value || new Vector3();
self.value.setFromMatrixPosition( light.matrixWorld );
self.value.applyMatrix4( camera.matrixWorldInverse );
} ) );
}
/**
* TSL function for getting the light target direction for the given light.
*
* @function
* @param {Light} light -The light source.
* @returns {Node<vec3>} The light's target direction.
*/
const lightTargetDirection = ( light ) => cameraViewMatrix.transformDirection( lightPosition( light ).sub( lightTargetPosition( light ) ) );
/** @module LightsNode **/
const sortLights = ( lights ) => {
return lights.sort( ( a, b ) => a.id - b.id );
};
const getLightNodeById = ( id, lightNodes ) => {
for ( const lightNode of lightNodes ) {
if ( lightNode.isAnalyticLightNode && lightNode.light.id === id ) {
return lightNode;
}
}
return null;
};
const _lightsNodeRef = /*@__PURE__*/ new WeakMap();
/**
* This node represents the scene's lighting and manages the lighting model's life cycle
* for the current build 3D object. It is responsible for computing the total outgoing
* light in a given lighting context.
*
* @augments Node
*/
class LightsNode extends Node {
static get type() {
return 'LightsNode';
}
/**
* Constructs a new lights node.
*/
constructor() {
super( 'vec3' );
/**
* A node representing the total diffuse light.
*
* @type {Node<vec3>}
*/
this.totalDiffuseNode = vec3().toVar( 'totalDiffuse' );
/**
* A node representing the total specular light.
*
* @type {Node<vec3>}
*/
this.totalSpecularNode = vec3().toVar( 'totalSpecular' );
/**
* A node representing the outgoing light.
*
* @type {Node<vec3>}
*/
this.outgoingLightNode = vec3().toVar( 'outgoingLight' );
/**
* An array representing the lights in the scene.
*
* @private
* @type {Array<Light>}
*/
this._lights = [];
/**
* For each light in the scene, this node will create a
* corresponding light node.
*
* @private
* @type {Array<LightingNode>?}
* @default null
*/
this._lightNodes = null;
/**
* A hash for identifying the current light nodes setup.
*
* @private
* @type {String?}
* @default null
*/
this._lightNodesHash = null;
/**
* `LightsNode` sets this property to `true` by default.
*
* @type {Boolean}
* @default true
*/
this.global = true;
}
/**
* Overwrites the default {@link Node#customCacheKey} implementation by including the
* light IDs into the cache key.
*
* @return {Number} The custom cache key.
*/
customCacheKey() {
const lightIDs = [];
const lights = this._lights;
for ( let i = 0; i < lights.length; i ++ ) {
lightIDs.push( lights[ i ].id );
}
return hashArray( lightIDs );
}
/**
* Computes a hash value for identifying the current light nodes setup.
*
* @param {NodeBuilder} builder - A reference to the current node builder.
* @return {String} The computed hash.
*/
getHash( builder ) {
if ( this._lightNodesHash === null ) {
if ( this._lightNodes === null ) this.setupLightsNode( builder );
const hash = [];
for ( const lightNode of this._lightNodes ) {
hash.push( lightNode.getSelf().getHash() );
}
this._lightNodesHash = 'lights-' + hash.join( ',' );
}
return this._lightNodesHash;
}
analyze( builder ) {
const properties = builder.getDataFromNode( this );
for ( const node of properties.nodes ) {
node.build( builder );
}
}
/**
* Creates lighting nodes for each scene light. This makes it possible to further
* process lights in the node system.
*
* @param {NodeBuilder} builder - A reference to the current node builder.
*/
setupLightsNode( builder ) {
const lightNodes = [];
const previousLightNodes = this._lightNodes;
const lights = sortLights( this._lights );
const nodeLibrary = builder.renderer.library;
for ( const light of lights ) {
if ( light.isNode ) {
lightNodes.push( nodeObject( light ) );
} else {
let lightNode = null;
if ( previousLightNodes !== null ) {
lightNode = getLightNodeById( light.id, previousLightNodes ); // reuse existing light node
}
if ( lightNode === null ) {
// find the corresponding node type for a given light
const lightNodeClass = nodeLibrary.getLightNodeClass( light.constructor );
if ( lightNodeClass === null ) {
console.warn( `LightsNode.setupNodeLights: Light node not found for ${ light.constructor.name }` );
continue;
}
let lightNode = null;
if ( ! _lightsNodeRef.has( light ) ) {
lightNode = nodeObject( new lightNodeClass( light ) );
_lightsNodeRef.set( light, lightNode );
} else {
lightNode = _lightsNodeRef.get( light );
}
lightNodes.push( lightNode );
}
}
}
this._lightNodes = lightNodes;
}
/**
* Setups the internal lights by building all respective
* light nodes.
*
* @param {NodeBuilder} builder - A reference to the current node builder.
* @param {Array<LightingNode>} lightNodes - An array of lighting nodes.
*/
setupLights( builder, lightNodes ) {
for ( const lightNode of lightNodes ) {
lightNode.build( builder );
}
}
/**
* The implementation makes sure that for each light in the scene
* there is a corresponding light node. By building the light nodes
* and evaluating the lighting model the outgoing light is computed.
*
* @param {NodeBuilder} builder - A reference to the current node builder.
* @return {Node<vec3>} A node representing the outgoing light.
*/
setup( builder ) {
if ( this._lightNodes === null ) this.setupLightsNode( builder );
const context = builder.context;
const lightingModel = context.lightingModel;
let outgoingLightNode = this.outgoingLightNode;
if ( lightingModel ) {
const { _lightNodes, totalDiffuseNode, totalSpecularNode } = this;
context.outgoingLight = outgoingLightNode;
const stack = builder.addStack();
//
const properties = builder.getDataFromNode( this );
properties.nodes = stack.nodes;
//
lightingModel.start( context, stack, builder );
// lights
this.setupLights( builder, _lightNodes );
//
lightingModel.indirect( context, stack, builder );
//
const { backdrop, backdropAlpha } = context;
const { directDiffuse, directSpecular, indirectDiffuse, indirectSpecular } = context.reflectedLight;
let totalDiffuse = directDiffuse.add( indirectDiffuse );
if ( backdrop !== null ) {
if ( backdropAlpha !== null ) {
totalDiffuse = vec3( backdropAlpha.mix( totalDiffuse, backdrop ) );
} else {
totalDiffuse = vec3( backdrop );
}
context.material.transparent = true;
}
totalDiffuseNode.assign( totalDiffuse );
totalSpecularNode.assign( directSpecular.add( indirectSpecular ) );
outgoingLightNode.assign( totalDiffuseNode.add( totalSpecularNode ) );
//
lightingModel.finish( context, stack, builder );
//
outgoingLightNode = outgoingLightNode.bypass( builder.removeStack() );
}
return outgoingLightNode;
}
/**
* Configures this node with an array of lights.
*
* @param {Array<Light>} lights - An array of lights.
* @return {LightsNode} A reference to this node.
*/
setLights( lights ) {
this._lights = lights;
this._lightNodes = null;
this._lightNodesHash = null;
return this;
}
/**
* Returns an array of the scene's lights.
*
* @return {Array<Light>} The scene's lights.
*/
getLights() {
return this._lights;
}
/**
* Whether the scene has lights or not.
*
* @type {Boolean}
*/
get hasLights() {
return this._lights.length > 0;
}
}
/**
* TSL function for creating an instance of `LightsNode` and configuring
* it with the given array of lights.
*
* @function
* @param {Array<Light>} lights - An array of lights.
* @return {LightsNode} The created lights node.
*/
const lights = ( lights = [] ) => nodeObject( new LightsNode() ).setLights( lights );
/** @module ShadowBaseNode **/
/**
* Base class for all shadow nodes.
*
* Shadow nodes encapsulate shadow related logic and are always coupled to lighting nodes.
* Lighting nodes might share the same shadow node type or use specific ones depending on
* their requirements.
*
* @augments Node
*/
class ShadowBaseNode extends Node {
static get type() {
return 'ShadowBaseNode';
}
/**
* Constructs a new shadow base node.
*
* @param {Light} light - The shadow casting light.
*/
constructor( light ) {
super();
/**
* The shadow casting light.
*
* @type {Light}
*/
this.light = light;
/**
* Overwritten since shadows are updated by default per render.
*
* @type {String}
* @default 'render'
*/
this.updateBeforeType = NodeUpdateType.RENDER;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isShadowBaseNode = true;
}
/**
* Setups the shadow position node which is by default the predefined TSL node object `shadowPositionWorld`.
*
* @param {(NodeBuilder|{Material})} object - A configuration object that must at least hold a material reference.
*/
setupShadowPosition( { material } ) {
// Use assign inside an Fn()
shadowPositionWorld.assign( material.shadowPositionNode || positionWorld );
}
/**
* Can be called when the shadow isn't required anymore. That can happen when
* a lighting node stops casting shadows by setting {@link Object3D#castShadow}
* to `false`.
*/
dispose() {
this.updateBeforeType = NodeUpdateType.NONE;
}
}
/**
* TSL object that represents the vertex position in world space during the shadow pass.
*
* @type {Node<vec3>}
*/
const shadowPositionWorld = /*@__PURE__*/ vec3().toVar( 'shadowPositionWorld' );
/** @module RendererUtils **/
/**
* Saves the state of the given renderer and stores it into the given state object.
*
* If not state object is provided, the function creates one.
*
* @function
* @param {Renderer} renderer - The renderer.
* @param {Object} [state={}] - The state.
* @return {Object} The state.
*/
function saveRendererState( renderer, state = {} ) {
state.toneMapping = renderer.toneMapping;
state.toneMappingExposure = renderer.toneMappingExposure;
state.outputColorSpace = renderer.outputColorSpace;
state.renderTarget = renderer.getRenderTarget();
state.activeCubeFace = renderer.getActiveCubeFace();
state.activeMipmapLevel = renderer.getActiveMipmapLevel();
state.renderObjectFunction = renderer.getRenderObjectFunction();
state.pixelRatio = renderer.getPixelRatio();
state.mrt = renderer.getMRT();
state.clearColor = renderer.getClearColor( state.clearColor || new Color() );
state.clearAlpha = renderer.getClearAlpha();
state.autoClear = renderer.autoClear;
state.scissorTest = renderer.getScissorTest();
return state;
}
/**
* Saves the state of the given renderer and stores it into the given state object.
* Besides, the function also resets the state of the renderer to its default values.
*
* If not state object is provided, the function creates one.
*
* @function
* @param {Renderer} renderer - The renderer.
* @param {Object} [state={}] - The state.
* @return {Object} The state.
*/
function resetRendererState( renderer, state ) {
state = saveRendererState( renderer, state );
renderer.setMRT( null );
renderer.setRenderObjectFunction( null );
renderer.setClearColor( 0x000000, 1 );
renderer.autoClear = true;
return state;
}
/**
* Restores the state of the given renderer from the given state object.
*
* @function
* @param {Renderer} renderer - The renderer.
* @param {Object} state - The state to restore.
*/
function restoreRendererState( renderer, state ) {
renderer.toneMapping = state.toneMapping;
renderer.toneMappingExposure = state.toneMappingExposure;
renderer.outputColorSpace = state.outputColorSpace;
renderer.setRenderTarget( state.renderTarget, state.activeCubeFace, state.activeMipmapLevel );
renderer.setRenderObjectFunction( state.renderObjectFunction );
renderer.setPixelRatio( state.pixelRatio );
renderer.setMRT( state.mrt );
renderer.setClearColor( state.clearColor, state.clearAlpha );
renderer.autoClear = state.autoClear;
renderer.setScissorTest( state.scissorTest );
}
/**
* Saves the state of the given scene and stores it into the given state object.
*
* If not state object is provided, the function creates one.
*
* @function
* @param {Scene} scene - The scene.
* @param {Object} [state={}] - The state.
* @return {Object} The state.
*/
function saveSceneState( scene, state = {} ) {
state.background = scene.background;
state.backgroundNode = scene.backgroundNode;
state.overrideMaterial = scene.overrideMaterial;
return state;
}
/**
* Saves the state of the given scene and stores it into the given state object.
* Besides, the function also resets the state of the scene to its default values.
*
* If not state object is provided, the function creates one.
*
* @function
* @param {Scene} scene - The scene.
* @param {Object} [state={}] - The state.
* @return {Object} The state.
*/
function resetSceneState( scene, state ) {
state = saveSceneState( scene, state );
scene.background = null;
scene.backgroundNode = null;
scene.overrideMaterial = null;
return state;
}
/**
* Restores the state of the given scene from the given state object.
*
* @function
* @param {Scene} scene - The scene.
* @param {Object} state - The state to restore.
*/
function restoreSceneState( scene, state ) {
scene.background = state.background;
scene.backgroundNode = state.backgroundNode;
scene.overrideMaterial = state.overrideMaterial;
}
/**
* Saves the state of the given renderer and scene and stores it into the given state object.
*
* If not state object is provided, the function creates one.
*
* @function
* @param {Renderer} renderer - The renderer.
* @param {Scene} scene - The scene.
* @param {Object} [state={}] - The state.
* @return {Object} The state.
*/
function saveRendererAndSceneState( renderer, scene, state = {} ) {
state = saveRendererState( renderer, state );
state = saveSceneState( scene, state );
return state;
}
/**
* Saves the state of the given renderer and scene and stores it into the given state object.
* Besides, the function also resets the state of the renderer and scene to its default values.
*
* If not state object is provided, the function creates one.
*
* @function
* @param {Renderer} renderer - The renderer.
* @param {Scene} scene - The scene.
* @param {Object} [state={}] - The state.
* @return {Object} The state.
*/
function resetRendererAndSceneState( renderer, scene, state ) {
state = resetRendererState( renderer, state );
state = resetSceneState( scene, state );
return state;
}
/**
* Restores the state of the given renderer and scene from the given state object.
*
* @function
* @param {Renderer} renderer - The renderer.
* @param {Scene} scene - The scene.
* @param {Object} state - The state to restore.
*/
function restoreRendererAndSceneState( renderer, scene, state ) {
restoreRendererState( renderer, state );
restoreSceneState( scene, state );
}
var RendererUtils = /*#__PURE__*/Object.freeze({
__proto__: null,
resetRendererAndSceneState: resetRendererAndSceneState,
resetRendererState: resetRendererState,
resetSceneState: resetSceneState,
restoreRendererAndSceneState: restoreRendererAndSceneState,
restoreRendererState: restoreRendererState,
restoreSceneState: restoreSceneState,
saveRendererAndSceneState: saveRendererAndSceneState,
saveRendererState: saveRendererState,
saveSceneState: saveSceneState
});
/** @module ShadowNode **/
const shadowMaterialLib = /*@__PURE__*/ new WeakMap();
const linearDistance = /*@__PURE__*/ Fn( ( [ position, cameraNear, cameraFar ] ) => {
let dist = positionWorld.sub( position ).length();
dist = dist.sub( cameraNear ).div( cameraFar.sub( cameraNear ) );
dist = dist.saturate(); // clamp to [ 0, 1 ]
return dist;
} );
const linearShadowDistance = ( light ) => {
const camera = light.shadow.camera;
const nearDistance = reference( 'near', 'float', camera ).setGroup( renderGroup );
const farDistance = reference( 'far', 'float', camera ).setGroup( renderGroup );
const referencePosition = objectPosition( light );
return linearDistance( referencePosition, nearDistance, farDistance );
};
const getShadowMaterial = ( light ) => {
let material = shadowMaterialLib.get( light );
if ( material === undefined ) {
const depthNode = light.isPointLight ? linearShadowDistance( light ) : null;
material = new NodeMaterial();
material.colorNode = vec4( 0, 0, 0, 1 );
material.depthNode = depthNode;
material.isShadowPassMaterial = true; // Use to avoid other overrideMaterial override material.colorNode unintentionally when using material.shadowNode
material.name = 'ShadowMaterial';
material.fog = false;
shadowMaterialLib.set( light, material );
}
return material;
};
/**
* A shadow filtering function performing basic filtering. This is in fact an unfiltered version of the shadow map
* with a binary `[0,1]` result.
*
* @method
* @param {Object} inputs - The input parameter object.
* @param {DepthTexture} inputs.depthTexture - A reference to the shadow map's texture data.
* @param {Node<vec3>} inputs.shadowCoord - The shadow coordinates.
* @return {Node<float>} The filtering result.
*/
const BasicShadowFilter = /*@__PURE__*/ Fn( ( { depthTexture, shadowCoord } ) => {
return texture( depthTexture, shadowCoord.xy ).compare( shadowCoord.z );
} );
/**
* A shadow filtering function performing PCF filtering.
*
* @method
* @param {Object} inputs - The input parameter object.
* @param {DepthTexture} inputs.depthTexture - A reference to the shadow map's texture data.
* @param {Node<vec3>} inputs.shadowCoord - The shadow coordinates.
* @param {LightShadow} inputs.shadow - The light shadow.
* @return {Node<float>} The filtering result.
*/
const PCFShadowFilter = /*@__PURE__*/ Fn( ( { depthTexture, shadowCoord, shadow } ) => {
const depthCompare = ( uv, compare ) => texture( depthTexture, uv ).compare( compare );
const mapSize = reference( 'mapSize', 'vec2', shadow ).setGroup( renderGroup );
const radius = reference( 'radius', 'float', shadow ).setGroup( renderGroup );
const texelSize = vec2( 1 ).div( mapSize );
const dx0 = texelSize.x.negate().mul( radius );
const dy0 = texelSize.y.negate().mul( radius );
const dx1 = texelSize.x.mul( radius );
const dy1 = texelSize.y.mul( radius );
const dx2 = dx0.div( 2 );
const dy2 = dy0.div( 2 );
const dx3 = dx1.div( 2 );
const dy3 = dy1.div( 2 );
return add(
depthCompare( shadowCoord.xy.add( vec2( dx0, dy0 ) ), shadowCoord.z ),
depthCompare( shadowCoord.xy.add( vec2( 0, dy0 ) ), shadowCoord.z ),
depthCompare( shadowCoord.xy.add( vec2( dx1, dy0 ) ), shadowCoord.z ),
depthCompare( shadowCoord.xy.add( vec2( dx2, dy2 ) ), shadowCoord.z ),
depthCompare( shadowCoord.xy.add( vec2( 0, dy2 ) ), shadowCoord.z ),
depthCompare( shadowCoord.xy.add( vec2( dx3, dy2 ) ), shadowCoord.z ),
depthCompare( shadowCoord.xy.add( vec2( dx0, 0 ) ), shadowCoord.z ),
depthCompare( shadowCoord.xy.add( vec2( dx2, 0 ) ), shadowCoord.z ),
depthCompare( shadowCoord.xy, shadowCoord.z ),
depthCompare( shadowCoord.xy.add( vec2( dx3, 0 ) ), shadowCoord.z ),
depthCompare( shadowCoord.xy.add( vec2( dx1, 0 ) ), shadowCoord.z ),
depthCompare( shadowCoord.xy.add( vec2( dx2, dy3 ) ), shadowCoord.z ),
depthCompare( shadowCoord.xy.add( vec2( 0, dy3 ) ), shadowCoord.z ),
depthCompare( shadowCoord.xy.add( vec2( dx3, dy3 ) ), shadowCoord.z ),
depthCompare( shadowCoord.xy.add( vec2( dx0, dy1 ) ), shadowCoord.z ),
depthCompare( shadowCoord.xy.add( vec2( 0, dy1 ) ), shadowCoord.z ),
depthCompare( shadowCoord.xy.add( vec2( dx1, dy1 ) ), shadowCoord.z )
).mul( 1 / 17 );
} );
/**
* A shadow filtering function performing PCF soft filtering.
*
* @method
* @param {Object} inputs - The input parameter object.
* @param {DepthTexture} inputs.depthTexture - A reference to the shadow map's texture data.
* @param {Node<vec3>} inputs.shadowCoord - The shadow coordinates.
* @param {LightShadow} inputs.shadow - The light shadow.
* @return {Node<float>} The filtering result.
*/
const PCFSoftShadowFilter = /*@__PURE__*/ Fn( ( { depthTexture, shadowCoord, shadow } ) => {
const depthCompare = ( uv, compare ) => texture( depthTexture, uv ).compare( compare );
const mapSize = reference( 'mapSize', 'vec2', shadow ).setGroup( renderGroup );
const texelSize = vec2( 1 ).div( mapSize );
const dx = texelSize.x;
const dy = texelSize.y;
const uv = shadowCoord.xy;
const f = fract( uv.mul( mapSize ).add( 0.5 ) );
uv.subAssign( f.mul( texelSize ) );
return add(
depthCompare( uv, shadowCoord.z ),
depthCompare( uv.add( vec2( dx, 0 ) ), shadowCoord.z ),
depthCompare( uv.add( vec2( 0, dy ) ), shadowCoord.z ),
depthCompare( uv.add( texelSize ), shadowCoord.z ),
mix(
depthCompare( uv.add( vec2( dx.negate(), 0 ) ), shadowCoord.z ),
depthCompare( uv.add( vec2( dx.mul( 2 ), 0 ) ), shadowCoord.z ),
f.x
),
mix(
depthCompare( uv.add( vec2( dx.negate(), dy ) ), shadowCoord.z ),
depthCompare( uv.add( vec2( dx.mul( 2 ), dy ) ), shadowCoord.z ),
f.x
),
mix(
depthCompare( uv.add( vec2( 0, dy.negate() ) ), shadowCoord.z ),
depthCompare( uv.add( vec2( 0, dy.mul( 2 ) ) ), shadowCoord.z ),
f.y
),
mix(
depthCompare( uv.add( vec2( dx, dy.negate() ) ), shadowCoord.z ),
depthCompare( uv.add( vec2( dx, dy.mul( 2 ) ) ), shadowCoord.z ),
f.y
),
mix(
mix(
depthCompare( uv.add( vec2( dx.negate(), dy.negate() ) ), shadowCoord.z ),
depthCompare( uv.add( vec2( dx.mul( 2 ), dy.negate() ) ), shadowCoord.z ),
f.x
),
mix(
depthCompare( uv.add( vec2( dx.negate(), dy.mul( 2 ) ) ), shadowCoord.z ),
depthCompare( uv.add( vec2( dx.mul( 2 ), dy.mul( 2 ) ) ), shadowCoord.z ),
f.x
),
f.y
)
).mul( 1 / 9 );
} );
/**
* A shadow filtering function performing VSM filtering.
*
* @method
* @param {Object} inputs - The input parameter object.
* @param {DepthTexture} inputs.depthTexture - A reference to the shadow map's texture data.
* @param {Node<vec3>} inputs.shadowCoord - The shadow coordinates.
* @return {Node<float>} The filtering result.
*/
const VSMShadowFilter = /*@__PURE__*/ Fn( ( { depthTexture, shadowCoord } ) => {
const occlusion = float( 1 ).toVar();
const distribution = texture( depthTexture ).sample( shadowCoord.xy ).rg;
const hardShadow = step( shadowCoord.z, distribution.x );
If( hardShadow.notEqual( float( 1.0 ) ), () => {
const distance = shadowCoord.z.sub( distribution.x );
const variance = max$1( 0, distribution.y.mul( distribution.y ) );
let softnessProbability = variance.div( variance.add( distance.mul( distance ) ) ); // Chebeyshevs inequality
softnessProbability = clamp( sub( softnessProbability, 0.3 ).div( 0.95 - 0.3 ) );
occlusion.assign( clamp( max$1( hardShadow, softnessProbability ) ) );
} );
return occlusion;
} );
/**
* Represents the shader code for the first VSM render pass.
*
* @method
* @param {Object} inputs - The input parameter object.
* @param {Node<float>} inputs.samples - The number of samples
* @param {Node<float>} inputs.radius - The radius.
* @param {Node<float>} inputs.size - The size.
* @param {TextureNode} inputs.shadowPass - A reference to the render target's depth data.
* @return {Node<vec2>} The VSM output.
*/
const VSMPassVertical = /*@__PURE__*/ Fn( ( { samples, radius, size, shadowPass } ) => {
const mean = float( 0 ).toVar();
const squaredMean = float( 0 ).toVar();
const uvStride = samples.lessThanEqual( float( 1 ) ).select( float( 0 ), float( 2 ).div( samples.sub( 1 ) ) );
const uvStart = samples.lessThanEqual( float( 1 ) ).select( float( 0 ), float( - 1 ) );
Loop( { start: int( 0 ), end: int( samples ), type: 'int', condition: '<' }, ( { i } ) => {
const uvOffset = uvStart.add( float( i ).mul( uvStride ) );
const depth = shadowPass.sample( add( screenCoordinate.xy, vec2( 0, uvOffset ).mul( radius ) ).div( size ) ).x;
mean.addAssign( depth );
squaredMean.addAssign( depth.mul( depth ) );
} );
mean.divAssign( samples );
squaredMean.divAssign( samples );
const std_dev = sqrt( squaredMean.sub( mean.mul( mean ) ) );
return vec2( mean, std_dev );
} );
/**
* Represents the shader code for the second VSM render pass.
*
* @method
* @param {Object} inputs - The input parameter object.
* @param {Node<float>} inputs.samples - The number of samples
* @param {Node<float>} inputs.radius - The radius.
* @param {Node<float>} inputs.size - The size.
* @param {TextureNode} inputs.shadowPass - The result of the first VSM render pass.
* @return {Node<vec2>} The VSM output.
*/
const VSMPassHorizontal = /*@__PURE__*/ Fn( ( { samples, radius, size, shadowPass } ) => {
const mean = float( 0 ).toVar();
const squaredMean = float( 0 ).toVar();
const uvStride = samples.lessThanEqual( float( 1 ) ).select( float( 0 ), float( 2 ).div( samples.sub( 1 ) ) );
const uvStart = samples.lessThanEqual( float( 1 ) ).select( float( 0 ), float( - 1 ) );
Loop( { start: int( 0 ), end: int( samples ), type: 'int', condition: '<' }, ( { i } ) => {
const uvOffset = uvStart.add( float( i ).mul( uvStride ) );
const distribution = shadowPass.sample( add( screenCoordinate.xy, vec2( uvOffset, 0 ).mul( radius ) ).div( size ) );
mean.addAssign( distribution.x );
squaredMean.addAssign( add( distribution.y.mul( distribution.y ), distribution.x.mul( distribution.x ) ) );
} );
mean.divAssign( samples );
squaredMean.divAssign( samples );
const std_dev = sqrt( squaredMean.sub( mean.mul( mean ) ) );
return vec2( mean, std_dev );
} );
const _shadowFilterLib = [ BasicShadowFilter, PCFShadowFilter, PCFSoftShadowFilter, VSMShadowFilter ];
//
let _rendererState;
const _quadMesh = /*@__PURE__*/ new QuadMesh();
/**
* Represents the default shadow implementation for lighting nodes.
*
* @augments module:ShadowBaseNode~ShadowBaseNode
*/
class ShadowNode extends ShadowBaseNode {
static get type() {
return 'ShadowNode';
}
/**
* Constructs a new shadow node.
*
* @param {Light} light - The shadow casting light.
* @param {LightShadow?} [shadow=null] - An optional light shadow.
*/
constructor( light, shadow = null ) {
super( light );
/**
* The light shadow which defines the properties light's
* shadow.
*
* @type {LightShadow?}
* @default null
*/
this.shadow = shadow || light.shadow;
/**
* A reference to the shadow map which is a render target.
*
* @type {RenderTarget?}
* @default null
*/
this.shadowMap = null;
/**
* Only relevant for VSM shadows. Render target for the
* first VSM render pass.
*
* @type {RenderTarget?}
* @default null
*/
this.vsmShadowMapVertical = null;
/**
* Only relevant for VSM shadows. Render target for the
* second VSM render pass.
*
* @type {RenderTarget?}
* @default null
*/
this.vsmShadowMapHorizontal = null;
/**
* Only relevant for VSM shadows. Node material which
* is used to render the first VSM pass.
*
* @type {NodeMaterial?}
* @default null
*/
this.vsmMaterialVertical = null;
/**
* Only relevant for VSM shadows. Node material which
* is used to render the second VSM pass.
*
* @type {NodeMaterial?}
* @default null
*/
this.vsmMaterialHorizontal = null;
/**
* A reference to the output node which defines the
* final result of this shadow node.
*
* @type {Node?}
* @private
* @default null
*/
this._node = null;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isShadowNode = true;
}
/**
* Setups the shadow filtering.
*
* @param {NodeBuilder} builder - A reference to the current node builder.
* @param {Object} inputs - A configuration object that defines the shadow filtering.
* @param {Function} inputs.filterFn - This function defines the filtering type of the shadow map e.g. PCF.
* @param {DepthTexture} inputs.depthTexture - A reference to the shadow map's texture data.
* @param {Node<vec3>} inputs.shadowCoord - Shadow coordinates which are used to sample from the shadow map.
* @param {LightShadow} inputs.shadow - The light shadow.
* @return {Node<float>} The result node of the shadow filtering.
*/
setupShadowFilter( builder, { filterFn, depthTexture, shadowCoord, shadow } ) {
const frustumTest = shadowCoord.x.greaterThanEqual( 0 )
.and( shadowCoord.x.lessThanEqual( 1 ) )
.and( shadowCoord.y.greaterThanEqual( 0 ) )
.and( shadowCoord.y.lessThanEqual( 1 ) )
.and( shadowCoord.z.lessThanEqual( 1 ) );
const shadowNode = filterFn( { depthTexture, shadowCoord, shadow } );
return frustumTest.select( shadowNode, float( 1 ) );
}
/**
* Setups the shadow coordinates.
*
* @param {NodeBuilder} builder - A reference to the current node builder.
* @param {Node<vec3>} shadowPosition - A node representing the shadow position.
* @return {Node<vec3>} The shadow coordinates.
*/
setupShadowCoord( builder, shadowPosition ) {
const { shadow } = this;
const { renderer } = builder;
const bias = reference( 'bias', 'float', shadow ).setGroup( renderGroup );
let shadowCoord = shadowPosition;
let coordZ;
if ( shadow.camera.isOrthographicCamera || renderer.logarithmicDepthBuffer !== true ) {
shadowCoord = shadowCoord.xyz.div( shadowCoord.w );
coordZ = shadowCoord.z;
if ( renderer.coordinateSystem === WebGPUCoordinateSystem ) {
coordZ = coordZ.mul( 2 ).sub( 1 ); // WebGPU: Conversion [ 0, 1 ] to [ - 1, 1 ]
}
} else {
const w = shadowCoord.w;
shadowCoord = shadowCoord.xy.div( w ); // <-- Only divide X/Y coords since we don't need Z
// The normally available "cameraNear" and "cameraFar" nodes cannot be used here because they do not get
// updated to use the shadow camera. So, we have to declare our own "local" ones here.
// TODO: How do we get the cameraNear/cameraFar nodes to use the shadow camera so we don't have to declare local ones here?
const cameraNearLocal = reference( 'near', 'float', shadow.camera ).setGroup( renderGroup );
const cameraFarLocal = reference( 'far', 'float', shadow.camera ).setGroup( renderGroup );
coordZ = viewZToLogarithmicDepth( w.negate(), cameraNearLocal, cameraFarLocal );
}
shadowCoord = vec3(
shadowCoord.x,
shadowCoord.y.oneMinus(), // follow webgpu standards
coordZ.add( bias )
);
return shadowCoord;
}
/**
* Returns the shadow filtering function for the given shadow type.
*
* @param {Number} type - The shadow type.
* @return {Function} The filtering function.
*/
getShadowFilterFn( type ) {
return _shadowFilterLib[ type ];
}
/**
* Setups the shadow output node.
*
* @param {NodeBuilder} builder - A reference to the current node builder.
* @return {Node<vec3>} The shadow output node.
*/
setupShadow( builder ) {
const { renderer } = builder;
const { light, shadow } = this;
const shadowMapType = renderer.shadowMap.type;
const depthTexture = new DepthTexture( shadow.mapSize.width, shadow.mapSize.height );
depthTexture.compareFunction = LessCompare;
const shadowMap = builder.createRenderTarget( shadow.mapSize.width, shadow.mapSize.height );
shadowMap.depthTexture = depthTexture;
shadow.camera.updateProjectionMatrix();
// VSM
if ( shadowMapType === VSMShadowMap ) {
depthTexture.compareFunction = null; // VSM does not use textureSampleCompare()/texture2DCompare()
this.vsmShadowMapVertical = builder.createRenderTarget( shadow.mapSize.width, shadow.mapSize.height, { format: RGFormat, type: HalfFloatType } );
this.vsmShadowMapHorizontal = builder.createRenderTarget( shadow.mapSize.width, shadow.mapSize.height, { format: RGFormat, type: HalfFloatType } );
const shadowPassVertical = texture( depthTexture );
const shadowPassHorizontal = texture( this.vsmShadowMapVertical.texture );
const samples = reference( 'blurSamples', 'float', shadow ).setGroup( renderGroup );
const radius = reference( 'radius', 'float', shadow ).setGroup( renderGroup );
const size = reference( 'mapSize', 'vec2', shadow ).setGroup( renderGroup );
let material = this.vsmMaterialVertical || ( this.vsmMaterialVertical = new NodeMaterial() );
material.fragmentNode = VSMPassVertical( { samples, radius, size, shadowPass: shadowPassVertical } ).context( builder.getSharedContext() );
material.name = 'VSMVertical';
material = this.vsmMaterialHorizontal || ( this.vsmMaterialHorizontal = new NodeMaterial() );
material.fragmentNode = VSMPassHorizontal( { samples, radius, size, shadowPass: shadowPassHorizontal } ).context( builder.getSharedContext() );
material.name = 'VSMHorizontal';
}
//
const shadowIntensity = reference( 'intensity', 'float', shadow ).setGroup( renderGroup );
const normalBias = reference( 'normalBias', 'float', shadow ).setGroup( renderGroup );
const shadowPosition = lightShadowMatrix( light ).mul( shadowPositionWorld.add( transformedNormalWorld.mul( normalBias ) ) );
const shadowCoord = this.setupShadowCoord( builder, shadowPosition );
//
const filterFn = shadow.filterNode || this.getShadowFilterFn( renderer.shadowMap.type ) || null;
if ( filterFn === null ) {
throw new Error( 'THREE.WebGPURenderer: Shadow map type not supported yet.' );
}
const shadowDepthTexture = ( shadowMapType === VSMShadowMap ) ? this.vsmShadowMapHorizontal.texture : depthTexture;
const shadowNode = this.setupShadowFilter( builder, { filterFn, shadowTexture: shadowMap.texture, depthTexture: shadowDepthTexture, shadowCoord, shadow } );
const shadowColor = texture( shadowMap.texture, shadowCoord );
const shadowOutput = mix( 1, shadowNode.rgb.mix( shadowColor, 1 ), shadowIntensity.mul( shadowColor.a ) ).toVar();
this.shadowMap = shadowMap;
this.shadow.map = shadowMap;
return shadowOutput;
}
/**
* The implementation performs the setup of the output node. An output is only
* produces if shadow mapping is globally enabled in the renderer.
*
* @param {NodeBuilder} builder - A reference to the current node builder.
* @return {ShaderCallNodeInternal} The output node.
*/
setup( builder ) {
if ( builder.renderer.shadowMap.enabled === false ) return;
return Fn( () => {
let node = this._node;
this.setupShadowPosition( builder );
if ( node === null ) {
this._node = node = this.setupShadow( builder );
}
if ( builder.material.shadowNode ) { // @deprecated, r171
console.warn( 'THREE.NodeMaterial: ".shadowNode" is deprecated. Use ".castShadowNode" instead.' );
}
if ( builder.material.receivedShadowNode ) {
node = builder.material.receivedShadowNode( node );
}
return node;
} )();
}
/**
* Renders the shadow. The logic of this function could be included
* into {@link ShadowNode#updateShadow} however more specialized shadow
* nodes might require a custom shadow map rendering. By having a
* dedicated method, it's easier to overwrite the default behavior.
*
* @param {NodeFrame} frame - A reference to the current node frame.
*/
renderShadow( frame ) {
const { shadow, shadowMap, light } = this;
const { renderer, scene } = frame;
shadow.updateMatrices( light );
shadowMap.setSize( shadow.mapSize.width, shadow.mapSize.height );
renderer.render( scene, shadow.camera );
}
/**
* Updates the shadow.
*
* @param {NodeFrame} frame - A reference to the current node frame.
*/
updateShadow( frame ) {
const { shadowMap, light, shadow } = this;
const { renderer, scene, camera } = frame;
const shadowType = renderer.shadowMap.type;
const depthVersion = shadowMap.depthTexture.version;
this._depthVersionCached = depthVersion;
shadow.camera.layers.mask = camera.layers.mask;
const currentRenderObjectFunction = renderer.getRenderObjectFunction();
const currentMRT = renderer.getMRT();
const useVelocity = currentMRT ? currentMRT.has( 'velocity' ) : false;
_rendererState = resetRendererAndSceneState( renderer, scene, _rendererState );
scene.overrideMaterial = getShadowMaterial( light );
renderer.setRenderObjectFunction( ( object, scene, _camera, geometry, material, group, ...params ) => {
if ( object.castShadow === true || ( object.receiveShadow && shadowType === VSMShadowMap ) ) {
if ( useVelocity ) {
getDataFromObject( object ).useVelocity = true;
}
object.onBeforeShadow( renderer, object, camera, shadow.camera, geometry, scene.overrideMaterial, group );
renderer.renderObject( object, scene, _camera, geometry, material, group, ...params );
object.onAfterShadow( renderer, object, camera, shadow.camera, geometry, scene.overrideMaterial, group );
}
} );
renderer.setRenderTarget( shadowMap );
this.renderShadow( frame );
renderer.setRenderObjectFunction( currentRenderObjectFunction );
// vsm blur pass
if ( light.isPointLight !== true && shadowType === VSMShadowMap ) {
this.vsmPass( renderer );
}
restoreRendererAndSceneState( renderer, scene, _rendererState );
}
/**
* For VSM additional render passes are required.
*
* @param {Renderer} renderer - A reference to the current renderer.
*/
vsmPass( renderer ) {
const { shadow } = this;
this.vsmShadowMapVertical.setSize( shadow.mapSize.width, shadow.mapSize.height );
this.vsmShadowMapHorizontal.setSize( shadow.mapSize.width, shadow.mapSize.height );
renderer.setRenderTarget( this.vsmShadowMapVertical );
_quadMesh.material = this.vsmMaterialVertical;
_quadMesh.render( renderer );
renderer.setRenderTarget( this.vsmShadowMapHorizontal );
_quadMesh.material = this.vsmMaterialHorizontal;
_quadMesh.render( renderer );
}
/**
* Frees the internal resources of this shadow node.
*/
dispose() {
this.shadowMap.dispose();
this.shadowMap = null;
if ( this.vsmShadowMapVertical !== null ) {
this.vsmShadowMapVertical.dispose();
this.vsmShadowMapVertical = null;
this.vsmMaterialVertical.dispose();
this.vsmMaterialVertical = null;
}
if ( this.vsmShadowMapHorizontal !== null ) {
this.vsmShadowMapHorizontal.dispose();
this.vsmShadowMapHorizontal = null;
this.vsmMaterialHorizontal.dispose();
this.vsmMaterialHorizontal = null;
}
super.dispose();
}
/**
* The implementation performs the update of the shadow map if necessary.
*
* @param {NodeFrame} frame - A reference to the current node frame.
*/
updateBefore( frame ) {
const { shadow } = this;
const needsUpdate = shadow.needsUpdate || shadow.autoUpdate;
if ( needsUpdate ) {
this.updateShadow( frame );
if ( this.shadowMap.depthTexture.version === this._depthVersionCached ) {
shadow.needsUpdate = false;
}
}
}
}
/**
* TSL function for creating an instance of `ShadowNode`.
*
* @function
* @param {Light} light - The shadow casting light.
* @param {LightShadow} shadow - The light shadow.
* @return {ShadowNode} The created shadow node.
*/
const shadow = ( light, shadow ) => nodeObject( new ShadowNode( light, shadow ) );
/**
* Base class for analytic light nodes.
*
* @augments LightingNode
*/
class AnalyticLightNode extends LightingNode {
static get type() {
return 'AnalyticLightNode';
}
/**
* Constructs a new analytic light node.
*
* @param {Light?} [light=null] - The light source.
*/
constructor( light = null ) {
super();
/**
* The light source.
*
* @type {Light?}
* @default null
*/
this.light = light;
/**
* The light's color value.
*
* @type {Color}
*/
this.color = new Color();
/**
* The light's color node. Points to `colorNode` of the light source, if set. Otherwise
* it creates a uniform node based on {@link AnalyticLightNode#color}.
*
* @type {Node}
*/
this.colorNode = ( light && light.colorNode ) || uniform( this.color ).setGroup( renderGroup );
/**
* This property is used to retain a reference to the original value of {@link AnalyticLightNode#colorNode}.
* The final color node is represented by a different node when using shadows.
*
* @type {Node?}
* @default null
*/
this.baseColorNode = null;
/**
* Represents the light's shadow.
*
* @type {ShadowNode?}
* @default null
*/
this.shadowNode = null;
/**
* Represents the light's shadow color.
*
* @type {Node?}
* @default null
*/
this.shadowColorNode = null;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isAnalyticLightNode = true;
/**
* Overwritten since analytic light nodes are updated
* once per frame.
*
* @type {String}
* @default 'frame'
*/
this.updateType = NodeUpdateType.FRAME;
}
/**
* Overwrites the default {@link Node#customCacheKey} implementation by including the
* `light.id` and `light.castShadow` into the cache key.
*
* @return {Number} The custom cache key.
*/
customCacheKey() {
return hash$1( this.light.id, this.light.castShadow ? 1 : 0 );
}
getHash() {
return this.light.uuid;
}
/**
* Setups the shadow node for this light. The method exists so concrete light classes
* can setup different types of shadow nodes.
*
* @return {ShadowNode} The created shadow node.
*/
setupShadowNode() {
return shadow( this.light );
}
/**
* Setups the shadow for this light. This method is only executed if the light
* cast shadows and the current build object receives shadows. It incorporates
* shadows into the lighting computation.
*
* @param {NodeBuilder} builder - The current node builder.
*/
setupShadow( builder ) {
const { renderer } = builder;
if ( renderer.shadowMap.enabled === false ) return;
let shadowColorNode = this.shadowColorNode;
if ( shadowColorNode === null ) {
const customShadowNode = this.light.shadow.shadowNode;
let shadowNode;
if ( customShadowNode !== undefined ) {
shadowNode = nodeObject( customShadowNode );
} else {
shadowNode = this.setupShadowNode( builder );
}
this.shadowNode = shadowNode;
this.shadowColorNode = shadowColorNode = this.colorNode.mul( shadowNode );
this.baseColorNode = this.colorNode;
}
//
this.colorNode = shadowColorNode;
}
/**
* Unlike most other nodes, lighting nodes do not return a output node in {@link Node#setup}.
* The main purpose of lighting nodes is to configure the current {@link LightingModel} and/or
* invocate the respective interface methods.
*
* @param {NodeBuilder} builder - The current node builder.
*/
setup( builder ) {
this.colorNode = this.baseColorNode || this.colorNode;
if ( this.light.castShadow ) {
if ( builder.object.receiveShadow ) {
this.setupShadow( builder );
}
} else if ( this.shadowNode !== null ) {
this.shadowNode.dispose();
this.shadowNode = null;
this.shadowColorNode = null;
}
}
/**
* The update method is used to update light uniforms per frame.
* Potentially overwritten in concrete light nodes to update light
* specific uniforms.
*
* @param {NodeFrame} frame - A reference to the current node frame.
*/
update( /*frame*/ ) {
const { light } = this;
this.color.copy( light.color ).multiplyScalar( light.intensity );
}
}
/** @module LightUtils **/
/**
* Represents a `discard` shader operation in TSL.
*
* @method
* @param {Object} inputs - The input parameter object.
* @param {Node<float>} inputs.lightDistance - The distance of the light's position to the current fragment position.
* @param {Node<float>} inputs.cutoffDistance - The light's cutoff distance.
* @param {Node<float>} inputs.decayExponent - The light's decay exponent.
* @return {Node<float>} The distance falloff.
*/
const getDistanceAttenuation = /*@__PURE__*/ Fn( ( inputs ) => {
const { lightDistance, cutoffDistance, decayExponent } = inputs;
// based upon Frostbite 3 Moving to Physically-based Rendering
// page 32, equation 26: E[window1]
// https://seblagarde.files.wordpress.com/2015/07/course_notes_moving_frostbite_to_pbr_v32.pdf
const distanceFalloff = lightDistance.pow( decayExponent ).max( 0.01 ).reciprocal();
return cutoffDistance.greaterThan( 0 ).select(
distanceFalloff.mul( lightDistance.div( cutoffDistance ).pow4().oneMinus().clamp().pow2() ),
distanceFalloff
);
} ); // validated
/** @module PointShadowNode **/
const _clearColor$2 = /*@__PURE__*/ new Color();
// cubeToUV() maps a 3D direction vector suitable for cube texture mapping to a 2D
// vector suitable for 2D texture mapping. This code uses the following layout for the
// 2D texture:
//
// xzXZ
// y Y
//
// Y - Positive y direction
// y - Negative y direction
// X - Positive x direction
// x - Negative x direction
// Z - Positive z direction
// z - Negative z direction
//
// Source and test bed:
// https://gist.github.com/tschw/da10c43c467ce8afd0c4
const cubeToUV = /*@__PURE__*/ Fn( ( [ pos, texelSizeY ] ) => {
const v = pos.toVar();
// Number of texels to avoid at the edge of each square
const absV = abs( v );
// Intersect unit cube
const scaleToCube = div( 1.0, max$1( absV.x, max$1( absV.y, absV.z ) ) );
absV.mulAssign( scaleToCube );
// Apply scale to avoid seams
// two texels less per square (one texel will do for NEAREST)
v.mulAssign( scaleToCube.mul( texelSizeY.mul( 2 ).oneMinus() ) );
// Unwrap
// space: -1 ... 1 range for each square
//
// #X## dim := ( 4 , 2 )
// # # center := ( 1 , 1 )
const planar = vec2( v.xy ).toVar();
const almostATexel = texelSizeY.mul( 1.5 );
const almostOne = almostATexel.oneMinus();
If( absV.z.greaterThanEqual( almostOne ), () => {
If( v.z.greaterThan( 0.0 ), () => {
planar.x.assign( sub( 4.0, v.x ) );
} );
} ).ElseIf( absV.x.greaterThanEqual( almostOne ), () => {
const signX = sign( v.x );
planar.x.assign( v.z.mul( signX ).add( signX.mul( 2.0 ) ) );
} ).ElseIf( absV.y.greaterThanEqual( almostOne ), () => {
const signY = sign( v.y );
planar.x.assign( v.x.add( signY.mul( 2.0 ) ).add( 2.0 ) );
planar.y.assign( v.z.mul( signY ).sub( 2.0 ) );
} );
// Transform to UV space
// scale := 0.5 / dim
// translate := ( center + 0.5 ) / dim
return vec2( 0.125, 0.25 ).mul( planar ).add( vec2( 0.375, 0.75 ) ).flipY();
} ).setLayout( {
name: 'cubeToUV',
type: 'vec2',
inputs: [
{ name: 'pos', type: 'vec3' },
{ name: 'texelSizeY', type: 'float' }
]
} );
const BasicPointShadowFilter = /*@__PURE__*/ Fn( ( { depthTexture, bd3D, dp, texelSize } ) => {
return texture( depthTexture, cubeToUV( bd3D, texelSize.y ) ).compare( dp );
} );
const PointShadowFilter = /*@__PURE__*/ Fn( ( { depthTexture, bd3D, dp, texelSize, shadow } ) => {
const radius = reference( 'radius', 'float', shadow ).setGroup( renderGroup );
const offset = vec2( - 1.0, 1.0 ).mul( radius ).mul( texelSize.y );
return texture( depthTexture, cubeToUV( bd3D.add( offset.xyy ), texelSize.y ) ).compare( dp )
.add( texture( depthTexture, cubeToUV( bd3D.add( offset.yyy ), texelSize.y ) ).compare( dp ) )
.add( texture( depthTexture, cubeToUV( bd3D.add( offset.xyx ), texelSize.y ) ).compare( dp ) )
.add( texture( depthTexture, cubeToUV( bd3D.add( offset.yyx ), texelSize.y ) ).compare( dp ) )
.add( texture( depthTexture, cubeToUV( bd3D, texelSize.y ) ).compare( dp ) )
.add( texture( depthTexture, cubeToUV( bd3D.add( offset.xxy ), texelSize.y ) ).compare( dp ) )
.add( texture( depthTexture, cubeToUV( bd3D.add( offset.yxy ), texelSize.y ) ).compare( dp ) )
.add( texture( depthTexture, cubeToUV( bd3D.add( offset.xxx ), texelSize.y ) ).compare( dp ) )
.add( texture( depthTexture, cubeToUV( bd3D.add( offset.yxx ), texelSize.y ) ).compare( dp ) )
.mul( 1.0 / 9.0 );
} );
const pointShadowFilter = /*@__PURE__*/ Fn( ( { filterFn, depthTexture, shadowCoord, shadow } ) => {
// for point lights, the uniform @vShadowCoord is re-purposed to hold
// the vector from the light to the world-space position of the fragment.
const lightToPosition = shadowCoord.xyz.toVar();
const lightToPositionLength = lightToPosition.length();
const cameraNearLocal = uniform( 'float' ).setGroup( renderGroup ).onRenderUpdate( () => shadow.camera.near );
const cameraFarLocal = uniform( 'float' ).setGroup( renderGroup ).onRenderUpdate( () => shadow.camera.far );
const bias = reference( 'bias', 'float', shadow ).setGroup( renderGroup );
const mapSize = uniform( shadow.mapSize ).setGroup( renderGroup );
const result = float( 1.0 ).toVar();
If( lightToPositionLength.sub( cameraFarLocal ).lessThanEqual( 0.0 ).and( lightToPositionLength.sub( cameraNearLocal ).greaterThanEqual( 0.0 ) ), () => {
// dp = normalized distance from light to fragment position
const dp = lightToPositionLength.sub( cameraNearLocal ).div( cameraFarLocal.sub( cameraNearLocal ) ).toVar(); // need to clamp?
dp.addAssign( bias );
// bd3D = base direction 3D
const bd3D = lightToPosition.normalize();
const texelSize = vec2( 1.0 ).div( mapSize.mul( vec2( 4.0, 2.0 ) ) );
// percentage-closer filtering
result.assign( filterFn( { depthTexture, bd3D, dp, texelSize, shadow } ) );
} );
return result;
} );
const _viewport = /*@__PURE__*/ new Vector4();
const _viewportSize = /*@__PURE__*/ new Vector2();
const _shadowMapSize = /*@__PURE__*/ new Vector2();
/**
* Represents the shadow implementation for point light nodes.
*
* @augments module:ShadowNode~ShadowNode
*/
class PointShadowNode extends ShadowNode {
static get type() {
return 'PointShadowNode';
}
/**
* Constructs a new point shadow node.
*
* @param {PointLight} light - The shadow casting point light.
* @param {PointLightShadow?} [shadow=null] - An optional point light shadow.
*/
constructor( light, shadow = null ) {
super( light, shadow );
}
/**
* Overwrites the default implementation to return point light shadow specific
* filtering functions.
*
* @param {Number} type - The shadow type.
* @return {Function} The filtering function.
*/
getShadowFilterFn( type ) {
return type === BasicShadowMap ? BasicPointShadowFilter : PointShadowFilter;
}
/**
* Overwrites the default implementation so the unaltered shadow position is used.
*
* @param {NodeBuilder} builder - A reference to the current node builder.
* @param {Node<vec3>} shadowPosition - A node representing the shadow position.
* @return {Node<vec3>} The shadow coordinates.
*/
setupShadowCoord( builder, shadowPosition ) {
return shadowPosition;
}
/**
* Overwrites the default implementation to only use point light specific
* shadow filter functions.
*
* @param {NodeBuilder} builder - A reference to the current node builder.
* @param {Object} inputs - A configuration object that defines the shadow filtering.
* @param {Function} inputs.filterFn - This function defines the filtering type of the shadow map e.g. PCF.
* @param {Texture} inputs.shadowTexture - A reference to the shadow map's texture.
* @param {DepthTexture} inputs.depthTexture - A reference to the shadow map's texture data.
* @param {Node<vec3>} inputs.shadowCoord - Shadow coordinates which are used to sample from the shadow map.
* @param {LightShadow} inputs.shadow - The light shadow.
* @return {Node<float>} The result node of the shadow filtering.
*/
setupShadowFilter( builder, { filterFn, shadowTexture, depthTexture, shadowCoord, shadow } ) {
return pointShadowFilter( { filterFn, shadowTexture, depthTexture, shadowCoord, shadow } );
}
/**
* Overwrites the default implementation with point light specific
* rendering code.
*
* @param {NodeFrame} frame - A reference to the current node frame.
*/
renderShadow( frame ) {
const { shadow, shadowMap, light } = this;
const { renderer, scene } = frame;
const shadowFrameExtents = shadow.getFrameExtents();
_shadowMapSize.copy( shadow.mapSize );
_shadowMapSize.multiply( shadowFrameExtents );
shadowMap.setSize( _shadowMapSize.width, _shadowMapSize.height );
_viewportSize.copy( shadow.mapSize );
//
const previousAutoClear = renderer.autoClear;
const previousClearColor = renderer.getClearColor( _clearColor$2 );
const previousClearAlpha = renderer.getClearAlpha();
renderer.autoClear = false;
renderer.setClearColor( shadow.clearColor, shadow.clearAlpha );
renderer.clear();
const viewportCount = shadow.getViewportCount();
for ( let vp = 0; vp < viewportCount; vp ++ ) {
const viewport = shadow.getViewport( vp );
const x = _viewportSize.x * viewport.x;
const y = _shadowMapSize.y - _viewportSize.y - ( _viewportSize.y * viewport.y );
_viewport.set(
x,
y,
_viewportSize.x * viewport.z,
_viewportSize.y * viewport.w
);
shadowMap.viewport.copy( _viewport );
shadow.updateMatrices( light, vp );
renderer.render( scene, shadow.camera );
}
//
renderer.autoClear = previousAutoClear;
renderer.setClearColor( previousClearColor, previousClearAlpha );
}
}
/**
* TSL function for creating an instance of `PointShadowNode`.
*
* @function
* @param {PointLight} light - The shadow casting point light.
* @param {PointLightShadow?} [shadow=null] - An optional point light shadow.
* @return {PointShadowNode} The created point shadow node.
*/
const pointShadow = ( light, shadow ) => nodeObject( new PointShadowNode( light, shadow ) );
const directPointLight = Fn( ( { color, lightViewPosition, cutoffDistance, decayExponent }, builder ) => {
const lightingModel = builder.context.lightingModel;
const lVector = lightViewPosition.sub( positionView ); // @TODO: Add it into LightNode
const lightDirection = lVector.normalize();
const lightDistance = lVector.length();
const lightAttenuation = getDistanceAttenuation( {
lightDistance,
cutoffDistance,
decayExponent
} );
const lightColor = color.mul( lightAttenuation );
const reflectedLight = builder.context.reflectedLight;
lightingModel.direct( {
lightDirection,
lightColor,
reflectedLight
}, builder.stack, builder );
} );
/**
* Module for representing point lights as nodes.
*
* @augments AnalyticLightNode
*/
class PointLightNode extends AnalyticLightNode {
static get type() {
return 'PointLightNode';
}
/**
* Constructs a new point light node.
*
* @param {PointLight?} [light=null] - The point light source.
*/
constructor( light = null ) {
super( light );
/**
* Uniform node representing the cutoff distance.
*
* @type {UniformNode<float>}
*/
this.cutoffDistanceNode = uniform( 0 ).setGroup( renderGroup );
/**
* Uniform node representing the decay exponent.
*
* @type {UniformNode<float>}
*/
this.decayExponentNode = uniform( 2 ).setGroup( renderGroup );
}
/**
* Overwritten to updated point light specific uniforms.
*
* @param {NodeFrame} frame - A reference to the current node frame.
*/
update( frame ) {
const { light } = this;
super.update( frame );
this.cutoffDistanceNode.value = light.distance;
this.decayExponentNode.value = light.decay;
}
/**
* Overwritten to setup point light specific shadow.
*
* @return {PointShadowNode}
*/
setupShadowNode() {
return pointShadow( this.light );
}
setup( builder ) {
super.setup( builder );
directPointLight( {
color: this.colorNode,
lightViewPosition: lightViewPosition( this.light ),
cutoffDistance: this.cutoffDistanceNode,
decayExponent: this.decayExponentNode
} ).append();
}
}
/** @module Procedural **/
/**
* Creates a 2x2 checkerboard pattern that can be used as procedural texture data.
*
* @method
* @param {Node<vec2>} coord - The uv coordinates.
* @return {Node<float>} The result data.
*/
const checker = /*@__PURE__*/ Fn( ( [ coord = uv() ] ) => {
const uv = coord.mul( 2.0 );
const cx = uv.x.floor();
const cy = uv.y.floor();
const result = cx.add( cy ).mod( 2.0 );
return result.sign();
} );
/** @module Shapes **/
/**
* Generates a circle based on the uv coordinates.
*
* @method
* @param {Node<vec2>} coord - The uv to generate the circle.
* @return {Node<float>} The circle shape.
*/
const shapeCircle = Fn( ( [ coord = uv() ], { renderer, material } ) => {
const alpha = float( 1 ).toVar();
const len2 = lengthSq( coord.mul( 2 ).sub( 1 ) );
if ( material.alphaToCoverage && renderer.samples > 1 ) {
const dlen = float( len2.fwidth() ).toVar();
alpha.assign( smoothstep( dlen.oneMinus(), dlen.add( 1 ), len2 ).oneMinus() );
} else {
len2.greaterThan( 1.0 ).discard();
}
return alpha;
} );
// Three.js Transpiler
// https://raw.githubusercontent.com/AcademySoftwareFoundation/MaterialX/main/libraries/stdlib/genglsl/lib/mx_noise.glsl
const mx_select = /*@__PURE__*/ Fn( ( [ b_immutable, t_immutable, f_immutable ] ) => {
const f = float( f_immutable ).toVar();
const t = float( t_immutable ).toVar();
const b = bool( b_immutable ).toVar();
return select( b, t, f );
} ).setLayout( {
name: 'mx_select',
type: 'float',
inputs: [
{ name: 'b', type: 'bool' },
{ name: 't', type: 'float' },
{ name: 'f', type: 'float' }
]
} );
const mx_negate_if = /*@__PURE__*/ Fn( ( [ val_immutable, b_immutable ] ) => {
const b = bool( b_immutable ).toVar();
const val = float( val_immutable ).toVar();
return select( b, val.negate(), val );
} ).setLayout( {
name: 'mx_negate_if',
type: 'float',
inputs: [
{ name: 'val', type: 'float' },
{ name: 'b', type: 'bool' }
]
} );
const mx_floor = /*@__PURE__*/ Fn( ( [ x_immutable ] ) => {
const x = float( x_immutable ).toVar();
return int( floor( x ) );
} ).setLayout( {
name: 'mx_floor',
type: 'int',
inputs: [
{ name: 'x', type: 'float' }
]
} );
const mx_floorfrac = /*@__PURE__*/ Fn( ( [ x_immutable, i ] ) => {
const x = float( x_immutable ).toVar();
i.assign( mx_floor( x ) );
return x.sub( float( i ) );
} );
const mx_bilerp_0 = /*@__PURE__*/ Fn( ( [ v0_immutable, v1_immutable, v2_immutable, v3_immutable, s_immutable, t_immutable ] ) => {
const t = float( t_immutable ).toVar();
const s = float( s_immutable ).toVar();
const v3 = float( v3_immutable ).toVar();
const v2 = float( v2_immutable ).toVar();
const v1 = float( v1_immutable ).toVar();
const v0 = float( v0_immutable ).toVar();
const s1 = float( sub( 1.0, s ) ).toVar();
return sub( 1.0, t ).mul( v0.mul( s1 ).add( v1.mul( s ) ) ).add( t.mul( v2.mul( s1 ).add( v3.mul( s ) ) ) );
} ).setLayout( {
name: 'mx_bilerp_0',
type: 'float',
inputs: [
{ name: 'v0', type: 'float' },
{ name: 'v1', type: 'float' },
{ name: 'v2', type: 'float' },
{ name: 'v3', type: 'float' },
{ name: 's', type: 'float' },
{ name: 't', type: 'float' }
]
} );
const mx_bilerp_1 = /*@__PURE__*/ Fn( ( [ v0_immutable, v1_immutable, v2_immutable, v3_immutable, s_immutable, t_immutable ] ) => {
const t = float( t_immutable ).toVar();
const s = float( s_immutable ).toVar();
const v3 = vec3( v3_immutable ).toVar();
const v2 = vec3( v2_immutable ).toVar();
const v1 = vec3( v1_immutable ).toVar();
const v0 = vec3( v0_immutable ).toVar();
const s1 = float( sub( 1.0, s ) ).toVar();
return sub( 1.0, t ).mul( v0.mul( s1 ).add( v1.mul( s ) ) ).add( t.mul( v2.mul( s1 ).add( v3.mul( s ) ) ) );
} ).setLayout( {
name: 'mx_bilerp_1',
type: 'vec3',
inputs: [
{ name: 'v0', type: 'vec3' },
{ name: 'v1', type: 'vec3' },
{ name: 'v2', type: 'vec3' },
{ name: 'v3', type: 'vec3' },
{ name: 's', type: 'float' },
{ name: 't', type: 'float' }
]
} );
const mx_bilerp = /*@__PURE__*/ overloadingFn( [ mx_bilerp_0, mx_bilerp_1 ] );
const mx_trilerp_0 = /*@__PURE__*/ Fn( ( [ v0_immutable, v1_immutable, v2_immutable, v3_immutable, v4_immutable, v5_immutable, v6_immutable, v7_immutable, s_immutable, t_immutable, r_immutable ] ) => {
const r = float( r_immutable ).toVar();
const t = float( t_immutable ).toVar();
const s = float( s_immutable ).toVar();
const v7 = float( v7_immutable ).toVar();
const v6 = float( v6_immutable ).toVar();
const v5 = float( v5_immutable ).toVar();
const v4 = float( v4_immutable ).toVar();
const v3 = float( v3_immutable ).toVar();
const v2 = float( v2_immutable ).toVar();
const v1 = float( v1_immutable ).toVar();
const v0 = float( v0_immutable ).toVar();
const s1 = float( sub( 1.0, s ) ).toVar();
const t1 = float( sub( 1.0, t ) ).toVar();
const r1 = float( sub( 1.0, r ) ).toVar();
return r1.mul( t1.mul( v0.mul( s1 ).add( v1.mul( s ) ) ).add( t.mul( v2.mul( s1 ).add( v3.mul( s ) ) ) ) ).add( r.mul( t1.mul( v4.mul( s1 ).add( v5.mul( s ) ) ).add( t.mul( v6.mul( s1 ).add( v7.mul( s ) ) ) ) ) );
} ).setLayout( {
name: 'mx_trilerp_0',
type: 'float',
inputs: [
{ name: 'v0', type: 'float' },
{ name: 'v1', type: 'float' },
{ name: 'v2', type: 'float' },
{ name: 'v3', type: 'float' },
{ name: 'v4', type: 'float' },
{ name: 'v5', type: 'float' },
{ name: 'v6', type: 'float' },
{ name: 'v7', type: 'float' },
{ name: 's', type: 'float' },
{ name: 't', type: 'float' },
{ name: 'r', type: 'float' }
]
} );
const mx_trilerp_1 = /*@__PURE__*/ Fn( ( [ v0_immutable, v1_immutable, v2_immutable, v3_immutable, v4_immutable, v5_immutable, v6_immutable, v7_immutable, s_immutable, t_immutable, r_immutable ] ) => {
const r = float( r_immutable ).toVar();
const t = float( t_immutable ).toVar();
const s = float( s_immutable ).toVar();
const v7 = vec3( v7_immutable ).toVar();
const v6 = vec3( v6_immutable ).toVar();
const v5 = vec3( v5_immutable ).toVar();
const v4 = vec3( v4_immutable ).toVar();
const v3 = vec3( v3_immutable ).toVar();
const v2 = vec3( v2_immutable ).toVar();
const v1 = vec3( v1_immutable ).toVar();
const v0 = vec3( v0_immutable ).toVar();
const s1 = float( sub( 1.0, s ) ).toVar();
const t1 = float( sub( 1.0, t ) ).toVar();
const r1 = float( sub( 1.0, r ) ).toVar();
return r1.mul( t1.mul( v0.mul( s1 ).add( v1.mul( s ) ) ).add( t.mul( v2.mul( s1 ).add( v3.mul( s ) ) ) ) ).add( r.mul( t1.mul( v4.mul( s1 ).add( v5.mul( s ) ) ).add( t.mul( v6.mul( s1 ).add( v7.mul( s ) ) ) ) ) );
} ).setLayout( {
name: 'mx_trilerp_1',
type: 'vec3',
inputs: [
{ name: 'v0', type: 'vec3' },
{ name: 'v1', type: 'vec3' },
{ name: 'v2', type: 'vec3' },
{ name: 'v3', type: 'vec3' },
{ name: 'v4', type: 'vec3' },
{ name: 'v5', type: 'vec3' },
{ name: 'v6', type: 'vec3' },
{ name: 'v7', type: 'vec3' },
{ name: 's', type: 'float' },
{ name: 't', type: 'float' },
{ name: 'r', type: 'float' }
]
} );
const mx_trilerp = /*@__PURE__*/ overloadingFn( [ mx_trilerp_0, mx_trilerp_1 ] );
const mx_gradient_float_0 = /*@__PURE__*/ Fn( ( [ hash_immutable, x_immutable, y_immutable ] ) => {
const y = float( y_immutable ).toVar();
const x = float( x_immutable ).toVar();
const hash = uint( hash_immutable ).toVar();
const h = uint( hash.bitAnd( uint( 7 ) ) ).toVar();
const u = float( mx_select( h.lessThan( uint( 4 ) ), x, y ) ).toVar();
const v = float( mul( 2.0, mx_select( h.lessThan( uint( 4 ) ), y, x ) ) ).toVar();
return mx_negate_if( u, bool( h.bitAnd( uint( 1 ) ) ) ).add( mx_negate_if( v, bool( h.bitAnd( uint( 2 ) ) ) ) );
} ).setLayout( {
name: 'mx_gradient_float_0',
type: 'float',
inputs: [
{ name: 'hash', type: 'uint' },
{ name: 'x', type: 'float' },
{ name: 'y', type: 'float' }
]
} );
const mx_gradient_float_1 = /*@__PURE__*/ Fn( ( [ hash_immutable, x_immutable, y_immutable, z_immutable ] ) => {
const z = float( z_immutable ).toVar();
const y = float( y_immutable ).toVar();
const x = float( x_immutable ).toVar();
const hash = uint( hash_immutable ).toVar();
const h = uint( hash.bitAnd( uint( 15 ) ) ).toVar();
const u = float( mx_select( h.lessThan( uint( 8 ) ), x, y ) ).toVar();
const v = float( mx_select( h.lessThan( uint( 4 ) ), y, mx_select( h.equal( uint( 12 ) ).or( h.equal( uint( 14 ) ) ), x, z ) ) ).toVar();
return mx_negate_if( u, bool( h.bitAnd( uint( 1 ) ) ) ).add( mx_negate_if( v, bool( h.bitAnd( uint( 2 ) ) ) ) );
} ).setLayout( {
name: 'mx_gradient_float_1',
type: 'float',
inputs: [
{ name: 'hash', type: 'uint' },
{ name: 'x', type: 'float' },
{ name: 'y', type: 'float' },
{ name: 'z', type: 'float' }
]
} );
const mx_gradient_float = /*@__PURE__*/ overloadingFn( [ mx_gradient_float_0, mx_gradient_float_1 ] );
const mx_gradient_vec3_0 = /*@__PURE__*/ Fn( ( [ hash_immutable, x_immutable, y_immutable ] ) => {
const y = float( y_immutable ).toVar();
const x = float( x_immutable ).toVar();
const hash = uvec3( hash_immutable ).toVar();
return vec3( mx_gradient_float( hash.x, x, y ), mx_gradient_float( hash.y, x, y ), mx_gradient_float( hash.z, x, y ) );
} ).setLayout( {
name: 'mx_gradient_vec3_0',
type: 'vec3',
inputs: [
{ name: 'hash', type: 'uvec3' },
{ name: 'x', type: 'float' },
{ name: 'y', type: 'float' }
]
} );
const mx_gradient_vec3_1 = /*@__PURE__*/ Fn( ( [ hash_immutable, x_immutable, y_immutable, z_immutable ] ) => {
const z = float( z_immutable ).toVar();
const y = float( y_immutable ).toVar();
const x = float( x_immutable ).toVar();
const hash = uvec3( hash_immutable ).toVar();
return vec3( mx_gradient_float( hash.x, x, y, z ), mx_gradient_float( hash.y, x, y, z ), mx_gradient_float( hash.z, x, y, z ) );
} ).setLayout( {
name: 'mx_gradient_vec3_1',
type: 'vec3',
inputs: [
{ name: 'hash', type: 'uvec3' },
{ name: 'x', type: 'float' },
{ name: 'y', type: 'float' },
{ name: 'z', type: 'float' }
]
} );
const mx_gradient_vec3 = /*@__PURE__*/ overloadingFn( [ mx_gradient_vec3_0, mx_gradient_vec3_1 ] );
const mx_gradient_scale2d_0 = /*@__PURE__*/ Fn( ( [ v_immutable ] ) => {
const v = float( v_immutable ).toVar();
return mul( 0.6616, v );
} ).setLayout( {
name: 'mx_gradient_scale2d_0',
type: 'float',
inputs: [
{ name: 'v', type: 'float' }
]
} );
const mx_gradient_scale3d_0 = /*@__PURE__*/ Fn( ( [ v_immutable ] ) => {
const v = float( v_immutable ).toVar();
return mul( 0.9820, v );
} ).setLayout( {
name: 'mx_gradient_scale3d_0',
type: 'float',
inputs: [
{ name: 'v', type: 'float' }
]
} );
const mx_gradient_scale2d_1 = /*@__PURE__*/ Fn( ( [ v_immutable ] ) => {
const v = vec3( v_immutable ).toVar();
return mul( 0.6616, v );
} ).setLayout( {
name: 'mx_gradient_scale2d_1',
type: 'vec3',
inputs: [
{ name: 'v', type: 'vec3' }
]
} );
const mx_gradient_scale2d = /*@__PURE__*/ overloadingFn( [ mx_gradient_scale2d_0, mx_gradient_scale2d_1 ] );
const mx_gradient_scale3d_1 = /*@__PURE__*/ Fn( ( [ v_immutable ] ) => {
const v = vec3( v_immutable ).toVar();
return mul( 0.9820, v );
} ).setLayout( {
name: 'mx_gradient_scale3d_1',
type: 'vec3',
inputs: [
{ name: 'v', type: 'vec3' }
]
} );
const mx_gradient_scale3d = /*@__PURE__*/ overloadingFn( [ mx_gradient_scale3d_0, mx_gradient_scale3d_1 ] );
const mx_rotl32 = /*@__PURE__*/ Fn( ( [ x_immutable, k_immutable ] ) => {
const k = int( k_immutable ).toVar();
const x = uint( x_immutable ).toVar();
return x.shiftLeft( k ).bitOr( x.shiftRight( int( 32 ).sub( k ) ) );
} ).setLayout( {
name: 'mx_rotl32',
type: 'uint',
inputs: [
{ name: 'x', type: 'uint' },
{ name: 'k', type: 'int' }
]
} );
const mx_bjmix = /*@__PURE__*/ Fn( ( [ a, b, c ] ) => {
a.subAssign( c );
a.bitXorAssign( mx_rotl32( c, int( 4 ) ) );
c.addAssign( b );
b.subAssign( a );
b.bitXorAssign( mx_rotl32( a, int( 6 ) ) );
a.addAssign( c );
c.subAssign( b );
c.bitXorAssign( mx_rotl32( b, int( 8 ) ) );
b.addAssign( a );
a.subAssign( c );
a.bitXorAssign( mx_rotl32( c, int( 16 ) ) );
c.addAssign( b );
b.subAssign( a );
b.bitXorAssign( mx_rotl32( a, int( 19 ) ) );
a.addAssign( c );
c.subAssign( b );
c.bitXorAssign( mx_rotl32( b, int( 4 ) ) );
b.addAssign( a );
} );
const mx_bjfinal = /*@__PURE__*/ Fn( ( [ a_immutable, b_immutable, c_immutable ] ) => {
const c = uint( c_immutable ).toVar();
const b = uint( b_immutable ).toVar();
const a = uint( a_immutable ).toVar();
c.bitXorAssign( b );
c.subAssign( mx_rotl32( b, int( 14 ) ) );
a.bitXorAssign( c );
a.subAssign( mx_rotl32( c, int( 11 ) ) );
b.bitXorAssign( a );
b.subAssign( mx_rotl32( a, int( 25 ) ) );
c.bitXorAssign( b );
c.subAssign( mx_rotl32( b, int( 16 ) ) );
a.bitXorAssign( c );
a.subAssign( mx_rotl32( c, int( 4 ) ) );
b.bitXorAssign( a );
b.subAssign( mx_rotl32( a, int( 14 ) ) );
c.bitXorAssign( b );
c.subAssign( mx_rotl32( b, int( 24 ) ) );
return c;
} ).setLayout( {
name: 'mx_bjfinal',
type: 'uint',
inputs: [
{ name: 'a', type: 'uint' },
{ name: 'b', type: 'uint' },
{ name: 'c', type: 'uint' }
]
} );
const mx_bits_to_01 = /*@__PURE__*/ Fn( ( [ bits_immutable ] ) => {
const bits = uint( bits_immutable ).toVar();
return float( bits ).div( float( uint( int( 0xffffffff ) ) ) );
} ).setLayout( {
name: 'mx_bits_to_01',
type: 'float',
inputs: [
{ name: 'bits', type: 'uint' }
]
} );
const mx_fade = /*@__PURE__*/ Fn( ( [ t_immutable ] ) => {
const t = float( t_immutable ).toVar();
return t.mul( t ).mul( t ).mul( t.mul( t.mul( 6.0 ).sub( 15.0 ) ).add( 10.0 ) );
} ).setLayout( {
name: 'mx_fade',
type: 'float',
inputs: [
{ name: 't', type: 'float' }
]
} );
const mx_hash_int_0 = /*@__PURE__*/ Fn( ( [ x_immutable ] ) => {
const x = int( x_immutable ).toVar();
const len = uint( uint( 1 ) ).toVar();
const seed = uint( uint( int( 0xdeadbeef ) ).add( len.shiftLeft( uint( 2 ) ) ).add( uint( 13 ) ) ).toVar();
return mx_bjfinal( seed.add( uint( x ) ), seed, seed );
} ).setLayout( {
name: 'mx_hash_int_0',
type: 'uint',
inputs: [
{ name: 'x', type: 'int' }
]
} );
const mx_hash_int_1 = /*@__PURE__*/ Fn( ( [ x_immutable, y_immutable ] ) => {
const y = int( y_immutable ).toVar();
const x = int( x_immutable ).toVar();
const len = uint( uint( 2 ) ).toVar();
const a = uint().toVar(), b = uint().toVar(), c = uint().toVar();
a.assign( b.assign( c.assign( uint( int( 0xdeadbeef ) ).add( len.shiftLeft( uint( 2 ) ) ).add( uint( 13 ) ) ) ) );
a.addAssign( uint( x ) );
b.addAssign( uint( y ) );
return mx_bjfinal( a, b, c );
} ).setLayout( {
name: 'mx_hash_int_1',
type: 'uint',
inputs: [
{ name: 'x', type: 'int' },
{ name: 'y', type: 'int' }
]
} );
const mx_hash_int_2 = /*@__PURE__*/ Fn( ( [ x_immutable, y_immutable, z_immutable ] ) => {
const z = int( z_immutable ).toVar();
const y = int( y_immutable ).toVar();
const x = int( x_immutable ).toVar();
const len = uint( uint( 3 ) ).toVar();
const a = uint().toVar(), b = uint().toVar(), c = uint().toVar();
a.assign( b.assign( c.assign( uint( int( 0xdeadbeef ) ).add( len.shiftLeft( uint( 2 ) ) ).add( uint( 13 ) ) ) ) );
a.addAssign( uint( x ) );
b.addAssign( uint( y ) );
c.addAssign( uint( z ) );
return mx_bjfinal( a, b, c );
} ).setLayout( {
name: 'mx_hash_int_2',
type: 'uint',
inputs: [
{ name: 'x', type: 'int' },
{ name: 'y', type: 'int' },
{ name: 'z', type: 'int' }
]
} );
const mx_hash_int_3 = /*@__PURE__*/ Fn( ( [ x_immutable, y_immutable, z_immutable, xx_immutable ] ) => {
const xx = int( xx_immutable ).toVar();
const z = int( z_immutable ).toVar();
const y = int( y_immutable ).toVar();
const x = int( x_immutable ).toVar();
const len = uint( uint( 4 ) ).toVar();
const a = uint().toVar(), b = uint().toVar(), c = uint().toVar();
a.assign( b.assign( c.assign( uint( int( 0xdeadbeef ) ).add( len.shiftLeft( uint( 2 ) ) ).add( uint( 13 ) ) ) ) );
a.addAssign( uint( x ) );
b.addAssign( uint( y ) );
c.addAssign( uint( z ) );
mx_bjmix( a, b, c );
a.addAssign( uint( xx ) );
return mx_bjfinal( a, b, c );
} ).setLayout( {
name: 'mx_hash_int_3',
type: 'uint',
inputs: [
{ name: 'x', type: 'int' },
{ name: 'y', type: 'int' },
{ name: 'z', type: 'int' },
{ name: 'xx', type: 'int' }
]
} );
const mx_hash_int_4 = /*@__PURE__*/ Fn( ( [ x_immutable, y_immutable, z_immutable, xx_immutable, yy_immutable ] ) => {
const yy = int( yy_immutable ).toVar();
const xx = int( xx_immutable ).toVar();
const z = int( z_immutable ).toVar();
const y = int( y_immutable ).toVar();
const x = int( x_immutable ).toVar();
const len = uint( uint( 5 ) ).toVar();
const a = uint().toVar(), b = uint().toVar(), c = uint().toVar();
a.assign( b.assign( c.assign( uint( int( 0xdeadbeef ) ).add( len.shiftLeft( uint( 2 ) ) ).add( uint( 13 ) ) ) ) );
a.addAssign( uint( x ) );
b.addAssign( uint( y ) );
c.addAssign( uint( z ) );
mx_bjmix( a, b, c );
a.addAssign( uint( xx ) );
b.addAssign( uint( yy ) );
return mx_bjfinal( a, b, c );
} ).setLayout( {
name: 'mx_hash_int_4',
type: 'uint',
inputs: [
{ name: 'x', type: 'int' },
{ name: 'y', type: 'int' },
{ name: 'z', type: 'int' },
{ name: 'xx', type: 'int' },
{ name: 'yy', type: 'int' }
]
} );
const mx_hash_int = /*@__PURE__*/ overloadingFn( [ mx_hash_int_0, mx_hash_int_1, mx_hash_int_2, mx_hash_int_3, mx_hash_int_4 ] );
const mx_hash_vec3_0 = /*@__PURE__*/ Fn( ( [ x_immutable, y_immutable ] ) => {
const y = int( y_immutable ).toVar();
const x = int( x_immutable ).toVar();
const h = uint( mx_hash_int( x, y ) ).toVar();
const result = uvec3().toVar();
result.x.assign( h.bitAnd( int( 0xFF ) ) );
result.y.assign( h.shiftRight( int( 8 ) ).bitAnd( int( 0xFF ) ) );
result.z.assign( h.shiftRight( int( 16 ) ).bitAnd( int( 0xFF ) ) );
return result;
} ).setLayout( {
name: 'mx_hash_vec3_0',
type: 'uvec3',
inputs: [
{ name: 'x', type: 'int' },
{ name: 'y', type: 'int' }
]
} );
const mx_hash_vec3_1 = /*@__PURE__*/ Fn( ( [ x_immutable, y_immutable, z_immutable ] ) => {
const z = int( z_immutable ).toVar();
const y = int( y_immutable ).toVar();
const x = int( x_immutable ).toVar();
const h = uint( mx_hash_int( x, y, z ) ).toVar();
const result = uvec3().toVar();
result.x.assign( h.bitAnd( int( 0xFF ) ) );
result.y.assign( h.shiftRight( int( 8 ) ).bitAnd( int( 0xFF ) ) );
result.z.assign( h.shiftRight( int( 16 ) ).bitAnd( int( 0xFF ) ) );
return result;
} ).setLayout( {
name: 'mx_hash_vec3_1',
type: 'uvec3',
inputs: [
{ name: 'x', type: 'int' },
{ name: 'y', type: 'int' },
{ name: 'z', type: 'int' }
]
} );
const mx_hash_vec3 = /*@__PURE__*/ overloadingFn( [ mx_hash_vec3_0, mx_hash_vec3_1 ] );
const mx_perlin_noise_float_0 = /*@__PURE__*/ Fn( ( [ p_immutable ] ) => {
const p = vec2( p_immutable ).toVar();
const X = int().toVar(), Y = int().toVar();
const fx = float( mx_floorfrac( p.x, X ) ).toVar();
const fy = float( mx_floorfrac( p.y, Y ) ).toVar();
const u = float( mx_fade( fx ) ).toVar();
const v = float( mx_fade( fy ) ).toVar();
const result = float( mx_bilerp( mx_gradient_float( mx_hash_int( X, Y ), fx, fy ), mx_gradient_float( mx_hash_int( X.add( int( 1 ) ), Y ), fx.sub( 1.0 ), fy ), mx_gradient_float( mx_hash_int( X, Y.add( int( 1 ) ) ), fx, fy.sub( 1.0 ) ), mx_gradient_float( mx_hash_int( X.add( int( 1 ) ), Y.add( int( 1 ) ) ), fx.sub( 1.0 ), fy.sub( 1.0 ) ), u, v ) ).toVar();
return mx_gradient_scale2d( result );
} ).setLayout( {
name: 'mx_perlin_noise_float_0',
type: 'float',
inputs: [
{ name: 'p', type: 'vec2' }
]
} );
const mx_perlin_noise_float_1 = /*@__PURE__*/ Fn( ( [ p_immutable ] ) => {
const p = vec3( p_immutable ).toVar();
const X = int().toVar(), Y = int().toVar(), Z = int().toVar();
const fx = float( mx_floorfrac( p.x, X ) ).toVar();
const fy = float( mx_floorfrac( p.y, Y ) ).toVar();
const fz = float( mx_floorfrac( p.z, Z ) ).toVar();
const u = float( mx_fade( fx ) ).toVar();
const v = float( mx_fade( fy ) ).toVar();
const w = float( mx_fade( fz ) ).toVar();
const result = float( mx_trilerp( mx_gradient_float( mx_hash_int( X, Y, Z ), fx, fy, fz ), mx_gradient_float( mx_hash_int( X.add( int( 1 ) ), Y, Z ), fx.sub( 1.0 ), fy, fz ), mx_gradient_float( mx_hash_int( X, Y.add( int( 1 ) ), Z ), fx, fy.sub( 1.0 ), fz ), mx_gradient_float( mx_hash_int( X.add( int( 1 ) ), Y.add( int( 1 ) ), Z ), fx.sub( 1.0 ), fy.sub( 1.0 ), fz ), mx_gradient_float( mx_hash_int( X, Y, Z.add( int( 1 ) ) ), fx, fy, fz.sub( 1.0 ) ), mx_gradient_float( mx_hash_int( X.add( int( 1 ) ), Y, Z.add( int( 1 ) ) ), fx.sub( 1.0 ), fy, fz.sub( 1.0 ) ), mx_gradient_float( mx_hash_int( X, Y.add( int( 1 ) ), Z.add( int( 1 ) ) ), fx, fy.sub( 1.0 ), fz.sub( 1.0 ) ), mx_gradient_float( mx_hash_int( X.add( int( 1 ) ), Y.add( int( 1 ) ), Z.add( int( 1 ) ) ), fx.sub( 1.0 ), fy.sub( 1.0 ), fz.sub( 1.0 ) ), u, v, w ) ).toVar();
return mx_gradient_scale3d( result );
} ).setLayout( {
name: 'mx_perlin_noise_float_1',
type: 'float',
inputs: [
{ name: 'p', type: 'vec3' }
]
} );
const mx_perlin_noise_float = /*@__PURE__*/ overloadingFn( [ mx_perlin_noise_float_0, mx_perlin_noise_float_1 ] );
const mx_perlin_noise_vec3_0 = /*@__PURE__*/ Fn( ( [ p_immutable ] ) => {
const p = vec2( p_immutable ).toVar();
const X = int().toVar(), Y = int().toVar();
const fx = float( mx_floorfrac( p.x, X ) ).toVar();
const fy = float( mx_floorfrac( p.y, Y ) ).toVar();
const u = float( mx_fade( fx ) ).toVar();
const v = float( mx_fade( fy ) ).toVar();
const result = vec3( mx_bilerp( mx_gradient_vec3( mx_hash_vec3( X, Y ), fx, fy ), mx_gradient_vec3( mx_hash_vec3( X.add( int( 1 ) ), Y ), fx.sub( 1.0 ), fy ), mx_gradient_vec3( mx_hash_vec3( X, Y.add( int( 1 ) ) ), fx, fy.sub( 1.0 ) ), mx_gradient_vec3( mx_hash_vec3( X.add( int( 1 ) ), Y.add( int( 1 ) ) ), fx.sub( 1.0 ), fy.sub( 1.0 ) ), u, v ) ).toVar();
return mx_gradient_scale2d( result );
} ).setLayout( {
name: 'mx_perlin_noise_vec3_0',
type: 'vec3',
inputs: [
{ name: 'p', type: 'vec2' }
]
} );
const mx_perlin_noise_vec3_1 = /*@__PURE__*/ Fn( ( [ p_immutable ] ) => {
const p = vec3( p_immutable ).toVar();
const X = int().toVar(), Y = int().toVar(), Z = int().toVar();
const fx = float( mx_floorfrac( p.x, X ) ).toVar();
const fy = float( mx_floorfrac( p.y, Y ) ).toVar();
const fz = float( mx_floorfrac( p.z, Z ) ).toVar();
const u = float( mx_fade( fx ) ).toVar();
const v = float( mx_fade( fy ) ).toVar();
const w = float( mx_fade( fz ) ).toVar();
const result = vec3( mx_trilerp( mx_gradient_vec3( mx_hash_vec3( X, Y, Z ), fx, fy, fz ), mx_gradient_vec3( mx_hash_vec3( X.add( int( 1 ) ), Y, Z ), fx.sub( 1.0 ), fy, fz ), mx_gradient_vec3( mx_hash_vec3( X, Y.add( int( 1 ) ), Z ), fx, fy.sub( 1.0 ), fz ), mx_gradient_vec3( mx_hash_vec3( X.add( int( 1 ) ), Y.add( int( 1 ) ), Z ), fx.sub( 1.0 ), fy.sub( 1.0 ), fz ), mx_gradient_vec3( mx_hash_vec3( X, Y, Z.add( int( 1 ) ) ), fx, fy, fz.sub( 1.0 ) ), mx_gradient_vec3( mx_hash_vec3( X.add( int( 1 ) ), Y, Z.add( int( 1 ) ) ), fx.sub( 1.0 ), fy, fz.sub( 1.0 ) ), mx_gradient_vec3( mx_hash_vec3( X, Y.add( int( 1 ) ), Z.add( int( 1 ) ) ), fx, fy.sub( 1.0 ), fz.sub( 1.0 ) ), mx_gradient_vec3( mx_hash_vec3( X.add( int( 1 ) ), Y.add( int( 1 ) ), Z.add( int( 1 ) ) ), fx.sub( 1.0 ), fy.sub( 1.0 ), fz.sub( 1.0 ) ), u, v, w ) ).toVar();
return mx_gradient_scale3d( result );
} ).setLayout( {
name: 'mx_perlin_noise_vec3_1',
type: 'vec3',
inputs: [
{ name: 'p', type: 'vec3' }
]
} );
const mx_perlin_noise_vec3 = /*@__PURE__*/ overloadingFn( [ mx_perlin_noise_vec3_0, mx_perlin_noise_vec3_1 ] );
const mx_cell_noise_float_0 = /*@__PURE__*/ Fn( ( [ p_immutable ] ) => {
const p = float( p_immutable ).toVar();
const ix = int( mx_floor( p ) ).toVar();
return mx_bits_to_01( mx_hash_int( ix ) );
} ).setLayout( {
name: 'mx_cell_noise_float_0',
type: 'float',
inputs: [
{ name: 'p', type: 'float' }
]
} );
const mx_cell_noise_float_1 = /*@__PURE__*/ Fn( ( [ p_immutable ] ) => {
const p = vec2( p_immutable ).toVar();
const ix = int( mx_floor( p.x ) ).toVar();
const iy = int( mx_floor( p.y ) ).toVar();
return mx_bits_to_01( mx_hash_int( ix, iy ) );
} ).setLayout( {
name: 'mx_cell_noise_float_1',
type: 'float',
inputs: [
{ name: 'p', type: 'vec2' }
]
} );
const mx_cell_noise_float_2 = /*@__PURE__*/ Fn( ( [ p_immutable ] ) => {
const p = vec3( p_immutable ).toVar();
const ix = int( mx_floor( p.x ) ).toVar();
const iy = int( mx_floor( p.y ) ).toVar();
const iz = int( mx_floor( p.z ) ).toVar();
return mx_bits_to_01( mx_hash_int( ix, iy, iz ) );
} ).setLayout( {
name: 'mx_cell_noise_float_2',
type: 'float',
inputs: [
{ name: 'p', type: 'vec3' }
]
} );
const mx_cell_noise_float_3 = /*@__PURE__*/ Fn( ( [ p_immutable ] ) => {
const p = vec4( p_immutable ).toVar();
const ix = int( mx_floor( p.x ) ).toVar();
const iy = int( mx_floor( p.y ) ).toVar();
const iz = int( mx_floor( p.z ) ).toVar();
const iw = int( mx_floor( p.w ) ).toVar();
return mx_bits_to_01( mx_hash_int( ix, iy, iz, iw ) );
} ).setLayout( {
name: 'mx_cell_noise_float_3',
type: 'float',
inputs: [
{ name: 'p', type: 'vec4' }
]
} );
const mx_cell_noise_float$1 = /*@__PURE__*/ overloadingFn( [ mx_cell_noise_float_0, mx_cell_noise_float_1, mx_cell_noise_float_2, mx_cell_noise_float_3 ] );
const mx_cell_noise_vec3_0 = /*@__PURE__*/ Fn( ( [ p_immutable ] ) => {
const p = float( p_immutable ).toVar();
const ix = int( mx_floor( p ) ).toVar();
return vec3( mx_bits_to_01( mx_hash_int( ix, int( 0 ) ) ), mx_bits_to_01( mx_hash_int( ix, int( 1 ) ) ), mx_bits_to_01( mx_hash_int( ix, int( 2 ) ) ) );
} ).setLayout( {
name: 'mx_cell_noise_vec3_0',
type: 'vec3',
inputs: [
{ name: 'p', type: 'float' }
]
} );
const mx_cell_noise_vec3_1 = /*@__PURE__*/ Fn( ( [ p_immutable ] ) => {
const p = vec2( p_immutable ).toVar();
const ix = int( mx_floor( p.x ) ).toVar();
const iy = int( mx_floor( p.y ) ).toVar();
return vec3( mx_bits_to_01( mx_hash_int( ix, iy, int( 0 ) ) ), mx_bits_to_01( mx_hash_int( ix, iy, int( 1 ) ) ), mx_bits_to_01( mx_hash_int( ix, iy, int( 2 ) ) ) );
} ).setLayout( {
name: 'mx_cell_noise_vec3_1',
type: 'vec3',
inputs: [
{ name: 'p', type: 'vec2' }
]
} );
const mx_cell_noise_vec3_2 = /*@__PURE__*/ Fn( ( [ p_immutable ] ) => {
const p = vec3( p_immutable ).toVar();
const ix = int( mx_floor( p.x ) ).toVar();
const iy = int( mx_floor( p.y ) ).toVar();
const iz = int( mx_floor( p.z ) ).toVar();
return vec3( mx_bits_to_01( mx_hash_int( ix, iy, iz, int( 0 ) ) ), mx_bits_to_01( mx_hash_int( ix, iy, iz, int( 1 ) ) ), mx_bits_to_01( mx_hash_int( ix, iy, iz, int( 2 ) ) ) );
} ).setLayout( {
name: 'mx_cell_noise_vec3_2',
type: 'vec3',
inputs: [
{ name: 'p', type: 'vec3' }
]
} );
const mx_cell_noise_vec3_3 = /*@__PURE__*/ Fn( ( [ p_immutable ] ) => {
const p = vec4( p_immutable ).toVar();
const ix = int( mx_floor( p.x ) ).toVar();
const iy = int( mx_floor( p.y ) ).toVar();
const iz = int( mx_floor( p.z ) ).toVar();
const iw = int( mx_floor( p.w ) ).toVar();
return vec3( mx_bits_to_01( mx_hash_int( ix, iy, iz, iw, int( 0 ) ) ), mx_bits_to_01( mx_hash_int( ix, iy, iz, iw, int( 1 ) ) ), mx_bits_to_01( mx_hash_int( ix, iy, iz, iw, int( 2 ) ) ) );
} ).setLayout( {
name: 'mx_cell_noise_vec3_3',
type: 'vec3',
inputs: [
{ name: 'p', type: 'vec4' }
]
} );
const mx_cell_noise_vec3 = /*@__PURE__*/ overloadingFn( [ mx_cell_noise_vec3_0, mx_cell_noise_vec3_1, mx_cell_noise_vec3_2, mx_cell_noise_vec3_3 ] );
const mx_fractal_noise_float$1 = /*@__PURE__*/ Fn( ( [ p_immutable, octaves_immutable, lacunarity_immutable, diminish_immutable ] ) => {
const diminish = float( diminish_immutable ).toVar();
const lacunarity = float( lacunarity_immutable ).toVar();
const octaves = int( octaves_immutable ).toVar();
const p = vec3( p_immutable ).toVar();
const result = float( 0.0 ).toVar();
const amplitude = float( 1.0 ).toVar();
Loop( octaves, () => {
result.addAssign( amplitude.mul( mx_perlin_noise_float( p ) ) );
amplitude.mulAssign( diminish );
p.mulAssign( lacunarity );
} );
return result;
} ).setLayout( {
name: 'mx_fractal_noise_float',
type: 'float',
inputs: [
{ name: 'p', type: 'vec3' },
{ name: 'octaves', type: 'int' },
{ name: 'lacunarity', type: 'float' },
{ name: 'diminish', type: 'float' }
]
} );
const mx_fractal_noise_vec3$1 = /*@__PURE__*/ Fn( ( [ p_immutable, octaves_immutable, lacunarity_immutable, diminish_immutable ] ) => {
const diminish = float( diminish_immutable ).toVar();
const lacunarity = float( lacunarity_immutable ).toVar();
const octaves = int( octaves_immutable ).toVar();
const p = vec3( p_immutable ).toVar();
const result = vec3( 0.0 ).toVar();
const amplitude = float( 1.0 ).toVar();
Loop( octaves, () => {
result.addAssign( amplitude.mul( mx_perlin_noise_vec3( p ) ) );
amplitude.mulAssign( diminish );
p.mulAssign( lacunarity );
} );
return result;
} ).setLayout( {
name: 'mx_fractal_noise_vec3',
type: 'vec3',
inputs: [
{ name: 'p', type: 'vec3' },
{ name: 'octaves', type: 'int' },
{ name: 'lacunarity', type: 'float' },
{ name: 'diminish', type: 'float' }
]
} );
const mx_fractal_noise_vec2$1 = /*@__PURE__*/ Fn( ( [ p_immutable, octaves_immutable, lacunarity_immutable, diminish_immutable ] ) => {
const diminish = float( diminish_immutable ).toVar();
const lacunarity = float( lacunarity_immutable ).toVar();
const octaves = int( octaves_immutable ).toVar();
const p = vec3( p_immutable ).toVar();
return vec2( mx_fractal_noise_float$1( p, octaves, lacunarity, diminish ), mx_fractal_noise_float$1( p.add( vec3( int( 19 ), int( 193 ), int( 17 ) ) ), octaves, lacunarity, diminish ) );
} ).setLayout( {
name: 'mx_fractal_noise_vec2',
type: 'vec2',
inputs: [
{ name: 'p', type: 'vec3' },
{ name: 'octaves', type: 'int' },
{ name: 'lacunarity', type: 'float' },
{ name: 'diminish', type: 'float' }
]
} );
const mx_fractal_noise_vec4$1 = /*@__PURE__*/ Fn( ( [ p_immutable, octaves_immutable, lacunarity_immutable, diminish_immutable ] ) => {
const diminish = float( diminish_immutable ).toVar();
const lacunarity = float( lacunarity_immutable ).toVar();
const octaves = int( octaves_immutable ).toVar();
const p = vec3( p_immutable ).toVar();
const c = vec3( mx_fractal_noise_vec3$1( p, octaves, lacunarity, diminish ) ).toVar();
const f = float( mx_fractal_noise_float$1( p.add( vec3( int( 19 ), int( 193 ), int( 17 ) ) ), octaves, lacunarity, diminish ) ).toVar();
return vec4( c, f );
} ).setLayout( {
name: 'mx_fractal_noise_vec4',
type: 'vec4',
inputs: [
{ name: 'p', type: 'vec3' },
{ name: 'octaves', type: 'int' },
{ name: 'lacunarity', type: 'float' },
{ name: 'diminish', type: 'float' }
]
} );
const mx_worley_distance_0 = /*@__PURE__*/ Fn( ( [ p_immutable, x_immutable, y_immutable, xoff_immutable, yoff_immutable, jitter_immutable, metric_immutable ] ) => {
const metric = int( metric_immutable ).toVar();
const jitter = float( jitter_immutable ).toVar();
const yoff = int( yoff_immutable ).toVar();
const xoff = int( xoff_immutable ).toVar();
const y = int( y_immutable ).toVar();
const x = int( x_immutable ).toVar();
const p = vec2( p_immutable ).toVar();
const tmp = vec3( mx_cell_noise_vec3( vec2( x.add( xoff ), y.add( yoff ) ) ) ).toVar();
const off = vec2( tmp.x, tmp.y ).toVar();
off.subAssign( 0.5 );
off.mulAssign( jitter );
off.addAssign( 0.5 );
const cellpos = vec2( vec2( float( x ), float( y ) ).add( off ) ).toVar();
const diff = vec2( cellpos.sub( p ) ).toVar();
If( metric.equal( int( 2 ) ), () => {
return abs( diff.x ).add( abs( diff.y ) );
} );
If( metric.equal( int( 3 ) ), () => {
return max$1( abs( diff.x ), abs( diff.y ) );
} );
return dot( diff, diff );
} ).setLayout( {
name: 'mx_worley_distance_0',
type: 'float',
inputs: [
{ name: 'p', type: 'vec2' },
{ name: 'x', type: 'int' },
{ name: 'y', type: 'int' },
{ name: 'xoff', type: 'int' },
{ name: 'yoff', type: 'int' },
{ name: 'jitter', type: 'float' },
{ name: 'metric', type: 'int' }
]
} );
const mx_worley_distance_1 = /*@__PURE__*/ Fn( ( [ p_immutable, x_immutable, y_immutable, z_immutable, xoff_immutable, yoff_immutable, zoff_immutable, jitter_immutable, metric_immutable ] ) => {
const metric = int( metric_immutable ).toVar();
const jitter = float( jitter_immutable ).toVar();
const zoff = int( zoff_immutable ).toVar();
const yoff = int( yoff_immutable ).toVar();
const xoff = int( xoff_immutable ).toVar();
const z = int( z_immutable ).toVar();
const y = int( y_immutable ).toVar();
const x = int( x_immutable ).toVar();
const p = vec3( p_immutable ).toVar();
const off = vec3( mx_cell_noise_vec3( vec3( x.add( xoff ), y.add( yoff ), z.add( zoff ) ) ) ).toVar();
off.subAssign( 0.5 );
off.mulAssign( jitter );
off.addAssign( 0.5 );
const cellpos = vec3( vec3( float( x ), float( y ), float( z ) ).add( off ) ).toVar();
const diff = vec3( cellpos.sub( p ) ).toVar();
If( metric.equal( int( 2 ) ), () => {
return abs( diff.x ).add( abs( diff.y ) ).add( abs( diff.z ) );
} );
If( metric.equal( int( 3 ) ), () => {
return max$1( max$1( abs( diff.x ), abs( diff.y ) ), abs( diff.z ) );
} );
return dot( diff, diff );
} ).setLayout( {
name: 'mx_worley_distance_1',
type: 'float',
inputs: [
{ name: 'p', type: 'vec3' },
{ name: 'x', type: 'int' },
{ name: 'y', type: 'int' },
{ name: 'z', type: 'int' },
{ name: 'xoff', type: 'int' },
{ name: 'yoff', type: 'int' },
{ name: 'zoff', type: 'int' },
{ name: 'jitter', type: 'float' },
{ name: 'metric', type: 'int' }
]
} );
const mx_worley_distance = /*@__PURE__*/ overloadingFn( [ mx_worley_distance_0, mx_worley_distance_1 ] );
const mx_worley_noise_float_0 = /*@__PURE__*/ Fn( ( [ p_immutable, jitter_immutable, metric_immutable ] ) => {
const metric = int( metric_immutable ).toVar();
const jitter = float( jitter_immutable ).toVar();
const p = vec2( p_immutable ).toVar();
const X = int().toVar(), Y = int().toVar();
const localpos = vec2( mx_floorfrac( p.x, X ), mx_floorfrac( p.y, Y ) ).toVar();
const sqdist = float( 1e6 ).toVar();
Loop( { start: - 1, end: int( 1 ), name: 'x', condition: '<=' }, ( { x } ) => {
Loop( { start: - 1, end: int( 1 ), name: 'y', condition: '<=' }, ( { y } ) => {
const dist = float( mx_worley_distance( localpos, x, y, X, Y, jitter, metric ) ).toVar();
sqdist.assign( min$1( sqdist, dist ) );
} );
} );
If( metric.equal( int( 0 ) ), () => {
sqdist.assign( sqrt( sqdist ) );
} );
return sqdist;
} ).setLayout( {
name: 'mx_worley_noise_float_0',
type: 'float',
inputs: [
{ name: 'p', type: 'vec2' },
{ name: 'jitter', type: 'float' },
{ name: 'metric', type: 'int' }
]
} );
const mx_worley_noise_vec2_0 = /*@__PURE__*/ Fn( ( [ p_immutable, jitter_immutable, metric_immutable ] ) => {
const metric = int( metric_immutable ).toVar();
const jitter = float( jitter_immutable ).toVar();
const p = vec2( p_immutable ).toVar();
const X = int().toVar(), Y = int().toVar();
const localpos = vec2( mx_floorfrac( p.x, X ), mx_floorfrac( p.y, Y ) ).toVar();
const sqdist = vec2( 1e6, 1e6 ).toVar();
Loop( { start: - 1, end: int( 1 ), name: 'x', condition: '<=' }, ( { x } ) => {
Loop( { start: - 1, end: int( 1 ), name: 'y', condition: '<=' }, ( { y } ) => {
const dist = float( mx_worley_distance( localpos, x, y, X, Y, jitter, metric ) ).toVar();
If( dist.lessThan( sqdist.x ), () => {
sqdist.y.assign( sqdist.x );
sqdist.x.assign( dist );
} ).ElseIf( dist.lessThan( sqdist.y ), () => {
sqdist.y.assign( dist );
} );
} );
} );
If( metric.equal( int( 0 ) ), () => {
sqdist.assign( sqrt( sqdist ) );
} );
return sqdist;
} ).setLayout( {
name: 'mx_worley_noise_vec2_0',
type: 'vec2',
inputs: [
{ name: 'p', type: 'vec2' },
{ name: 'jitter', type: 'float' },
{ name: 'metric', type: 'int' }
]
} );
const mx_worley_noise_vec3_0 = /*@__PURE__*/ Fn( ( [ p_immutable, jitter_immutable, metric_immutable ] ) => {
const metric = int( metric_immutable ).toVar();
const jitter = float( jitter_immutable ).toVar();
const p = vec2( p_immutable ).toVar();
const X = int().toVar(), Y = int().toVar();
const localpos = vec2( mx_floorfrac( p.x, X ), mx_floorfrac( p.y, Y ) ).toVar();
const sqdist = vec3( 1e6, 1e6, 1e6 ).toVar();
Loop( { start: - 1, end: int( 1 ), name: 'x', condition: '<=' }, ( { x } ) => {
Loop( { start: - 1, end: int( 1 ), name: 'y', condition: '<=' }, ( { y } ) => {
const dist = float( mx_worley_distance( localpos, x, y, X, Y, jitter, metric ) ).toVar();
If( dist.lessThan( sqdist.x ), () => {
sqdist.z.assign( sqdist.y );
sqdist.y.assign( sqdist.x );
sqdist.x.assign( dist );
} ).ElseIf( dist.lessThan( sqdist.y ), () => {
sqdist.z.assign( sqdist.y );
sqdist.y.assign( dist );
} ).ElseIf( dist.lessThan( sqdist.z ), () => {
sqdist.z.assign( dist );
} );
} );
} );
If( metric.equal( int( 0 ) ), () => {
sqdist.assign( sqrt( sqdist ) );
} );
return sqdist;
} ).setLayout( {
name: 'mx_worley_noise_vec3_0',
type: 'vec3',
inputs: [
{ name: 'p', type: 'vec2' },
{ name: 'jitter', type: 'float' },
{ name: 'metric', type: 'int' }
]
} );
const mx_worley_noise_float_1 = /*@__PURE__*/ Fn( ( [ p_immutable, jitter_immutable, metric_immutable ] ) => {
const metric = int( metric_immutable ).toVar();
const jitter = float( jitter_immutable ).toVar();
const p = vec3( p_immutable ).toVar();
const X = int().toVar(), Y = int().toVar(), Z = int().toVar();
const localpos = vec3( mx_floorfrac( p.x, X ), mx_floorfrac( p.y, Y ), mx_floorfrac( p.z, Z ) ).toVar();
const sqdist = float( 1e6 ).toVar();
Loop( { start: - 1, end: int( 1 ), name: 'x', condition: '<=' }, ( { x } ) => {
Loop( { start: - 1, end: int( 1 ), name: 'y', condition: '<=' }, ( { y } ) => {
Loop( { start: - 1, end: int( 1 ), name: 'z', condition: '<=' }, ( { z } ) => {
const dist = float( mx_worley_distance( localpos, x, y, z, X, Y, Z, jitter, metric ) ).toVar();
sqdist.assign( min$1( sqdist, dist ) );
} );
} );
} );
If( metric.equal( int( 0 ) ), () => {
sqdist.assign( sqrt( sqdist ) );
} );
return sqdist;
} ).setLayout( {
name: 'mx_worley_noise_float_1',
type: 'float',
inputs: [
{ name: 'p', type: 'vec3' },
{ name: 'jitter', type: 'float' },
{ name: 'metric', type: 'int' }
]
} );
const mx_worley_noise_float$1 = /*@__PURE__*/ overloadingFn( [ mx_worley_noise_float_0, mx_worley_noise_float_1 ] );
const mx_worley_noise_vec2_1 = /*@__PURE__*/ Fn( ( [ p_immutable, jitter_immutable, metric_immutable ] ) => {
const metric = int( metric_immutable ).toVar();
const jitter = float( jitter_immutable ).toVar();
const p = vec3( p_immutable ).toVar();
const X = int().toVar(), Y = int().toVar(), Z = int().toVar();
const localpos = vec3( mx_floorfrac( p.x, X ), mx_floorfrac( p.y, Y ), mx_floorfrac( p.z, Z ) ).toVar();
const sqdist = vec2( 1e6, 1e6 ).toVar();
Loop( { start: - 1, end: int( 1 ), name: 'x', condition: '<=' }, ( { x } ) => {
Loop( { start: - 1, end: int( 1 ), name: 'y', condition: '<=' }, ( { y } ) => {
Loop( { start: - 1, end: int( 1 ), name: 'z', condition: '<=' }, ( { z } ) => {
const dist = float( mx_worley_distance( localpos, x, y, z, X, Y, Z, jitter, metric ) ).toVar();
If( dist.lessThan( sqdist.x ), () => {
sqdist.y.assign( sqdist.x );
sqdist.x.assign( dist );
} ).ElseIf( dist.lessThan( sqdist.y ), () => {
sqdist.y.assign( dist );
} );
} );
} );
} );
If( metric.equal( int( 0 ) ), () => {
sqdist.assign( sqrt( sqdist ) );
} );
return sqdist;
} ).setLayout( {
name: 'mx_worley_noise_vec2_1',
type: 'vec2',
inputs: [
{ name: 'p', type: 'vec3' },
{ name: 'jitter', type: 'float' },
{ name: 'metric', type: 'int' }
]
} );
const mx_worley_noise_vec2$1 = /*@__PURE__*/ overloadingFn( [ mx_worley_noise_vec2_0, mx_worley_noise_vec2_1 ] );
const mx_worley_noise_vec3_1 = /*@__PURE__*/ Fn( ( [ p_immutable, jitter_immutable, metric_immutable ] ) => {
const metric = int( metric_immutable ).toVar();
const jitter = float( jitter_immutable ).toVar();
const p = vec3( p_immutable ).toVar();
const X = int().toVar(), Y = int().toVar(), Z = int().toVar();
const localpos = vec3( mx_floorfrac( p.x, X ), mx_floorfrac( p.y, Y ), mx_floorfrac( p.z, Z ) ).toVar();
const sqdist = vec3( 1e6, 1e6, 1e6 ).toVar();
Loop( { start: - 1, end: int( 1 ), name: 'x', condition: '<=' }, ( { x } ) => {
Loop( { start: - 1, end: int( 1 ), name: 'y', condition: '<=' }, ( { y } ) => {
Loop( { start: - 1, end: int( 1 ), name: 'z', condition: '<=' }, ( { z } ) => {
const dist = float( mx_worley_distance( localpos, x, y, z, X, Y, Z, jitter, metric ) ).toVar();
If( dist.lessThan( sqdist.x ), () => {
sqdist.z.assign( sqdist.y );
sqdist.y.assign( sqdist.x );
sqdist.x.assign( dist );
} ).ElseIf( dist.lessThan( sqdist.y ), () => {
sqdist.z.assign( sqdist.y );
sqdist.y.assign( dist );
} ).ElseIf( dist.lessThan( sqdist.z ), () => {
sqdist.z.assign( dist );
} );
} );
} );
} );
If( metric.equal( int( 0 ) ), () => {
sqdist.assign( sqrt( sqdist ) );
} );
return sqdist;
} ).setLayout( {
name: 'mx_worley_noise_vec3_1',
type: 'vec3',
inputs: [
{ name: 'p', type: 'vec3' },
{ name: 'jitter', type: 'float' },
{ name: 'metric', type: 'int' }
]
} );
const mx_worley_noise_vec3$1 = /*@__PURE__*/ overloadingFn( [ mx_worley_noise_vec3_0, mx_worley_noise_vec3_1 ] );
// Three.js Transpiler
// https://github.com/AcademySoftwareFoundation/MaterialX/blob/main/libraries/stdlib/genglsl/lib/mx_hsv.glsl
const mx_hsvtorgb = /*@__PURE__*/ Fn( ( [ hsv ] ) => {
const s = hsv.y;
const v = hsv.z;
const result = vec3().toVar();
If( s.lessThan( 0.0001 ), () => {
result.assign( vec3( v, v, v ) );
} ).Else( () => {
let h = hsv.x;
h = h.sub( floor( h ) ).mul( 6.0 ).toVar(); // TODO: check what .toVar() is needed in node system cache
const hi = int( trunc( h ) );
const f = h.sub( float( hi ) );
const p = v.mul( s.oneMinus() );
const q = v.mul( s.mul( f ).oneMinus() );
const t = v.mul( s.mul( f.oneMinus() ).oneMinus() );
If( hi.equal( int( 0 ) ), () => {
result.assign( vec3( v, t, p ) );
} ).ElseIf( hi.equal( int( 1 ) ), () => {
result.assign( vec3( q, v, p ) );
} ).ElseIf( hi.equal( int( 2 ) ), () => {
result.assign( vec3( p, v, t ) );
} ).ElseIf( hi.equal( int( 3 ) ), () => {
result.assign( vec3( p, q, v ) );
} ).ElseIf( hi.equal( int( 4 ) ), () => {
result.assign( vec3( t, p, v ) );
} ).Else( () => {
result.assign( vec3( v, p, q ) );
} );
} );
return result;
} ).setLayout( {
name: 'mx_hsvtorgb',
type: 'vec3',
inputs: [
{ name: 'hsv', type: 'vec3' }
]
} );
const mx_rgbtohsv = /*@__PURE__*/ Fn( ( [ c_immutable ] ) => {
const c = vec3( c_immutable ).toVar();
const r = float( c.x ).toVar();
const g = float( c.y ).toVar();
const b = float( c.z ).toVar();
const mincomp = float( min$1( r, min$1( g, b ) ) ).toVar();
const maxcomp = float( max$1( r, max$1( g, b ) ) ).toVar();
const delta = float( maxcomp.sub( mincomp ) ).toVar();
const h = float().toVar(), s = float().toVar(), v = float().toVar();
v.assign( maxcomp );
If( maxcomp.greaterThan( 0.0 ), () => {
s.assign( delta.div( maxcomp ) );
} ).Else( () => {
s.assign( 0.0 );
} );
If( s.lessThanEqual( 0.0 ), () => {
h.assign( 0.0 );
} ).Else( () => {
If( r.greaterThanEqual( maxcomp ), () => {
h.assign( g.sub( b ).div( delta ) );
} ).ElseIf( g.greaterThanEqual( maxcomp ), () => {
h.assign( add( 2.0, b.sub( r ).div( delta ) ) );
} ).Else( () => {
h.assign( add( 4.0, r.sub( g ).div( delta ) ) );
} );
h.mulAssign( 1.0 / 6.0 );
If( h.lessThan( 0.0 ), () => {
h.addAssign( 1.0 );
} );
} );
return vec3( h, s, v );
} ).setLayout( {
name: 'mx_rgbtohsv',
type: 'vec3',
inputs: [
{ name: 'c', type: 'vec3' }
]
} );
// Three.js Transpiler
// https://github.com/AcademySoftwareFoundation/MaterialX/blob/main/libraries/stdlib/genglsl/lib/mx_transform_color.glsl
const mx_srgb_texture_to_lin_rec709 = /*@__PURE__*/ Fn( ( [ color_immutable ] ) => {
const color = vec3( color_immutable ).toVar();
const isAbove = bvec3( greaterThan( color, vec3( 0.04045 ) ) ).toVar();
const linSeg = vec3( color.div( 12.92 ) ).toVar();
const powSeg = vec3( pow( max$1( color.add( vec3( 0.055 ) ), vec3( 0.0 ) ).div( 1.055 ), vec3( 2.4 ) ) ).toVar();
return mix( linSeg, powSeg, isAbove );
} ).setLayout( {
name: 'mx_srgb_texture_to_lin_rec709',
type: 'vec3',
inputs: [
{ name: 'color', type: 'vec3' }
]
} );
const mx_aastep = ( threshold, value ) => {
threshold = float( threshold );
value = float( value );
const afwidth = vec2( value.dFdx(), value.dFdy() ).length().mul( 0.70710678118654757 );
return smoothstep( threshold.sub( afwidth ), threshold.add( afwidth ), value );
};
const _ramp = ( a, b, uv, p ) => mix( a, b, uv[ p ].clamp() );
const mx_ramplr = ( valuel, valuer, texcoord = uv() ) => _ramp( valuel, valuer, texcoord, 'x' );
const mx_ramptb = ( valuet, valueb, texcoord = uv() ) => _ramp( valuet, valueb, texcoord, 'y' );
const _split = ( a, b, center, uv, p ) => mix( a, b, mx_aastep( center, uv[ p ] ) );
const mx_splitlr = ( valuel, valuer, center, texcoord = uv() ) => _split( valuel, valuer, center, texcoord, 'x' );
const mx_splittb = ( valuet, valueb, center, texcoord = uv() ) => _split( valuet, valueb, center, texcoord, 'y' );
const mx_transform_uv = ( uv_scale = 1, uv_offset = 0, uv_geo = uv() ) => uv_geo.mul( uv_scale ).add( uv_offset );
const mx_safepower = ( in1, in2 = 1 ) => {
in1 = float( in1 );
return in1.abs().pow( in2 ).mul( in1.sign() );
};
const mx_contrast = ( input, amount = 1, pivot = .5 ) => float( input ).sub( pivot ).mul( amount ).add( pivot );
const mx_noise_float = ( texcoord = uv(), amplitude = 1, pivot = 0 ) => mx_perlin_noise_float( texcoord.convert( 'vec2|vec3' ) ).mul( amplitude ).add( pivot );
//export const mx_noise_vec2 = ( texcoord = uv(), amplitude = 1, pivot = 0 ) => mx_perlin_noise_vec3( texcoord.convert( 'vec2|vec3' ) ).mul( amplitude ).add( pivot );
const mx_noise_vec3 = ( texcoord = uv(), amplitude = 1, pivot = 0 ) => mx_perlin_noise_vec3( texcoord.convert( 'vec2|vec3' ) ).mul( amplitude ).add( pivot );
const mx_noise_vec4 = ( texcoord = uv(), amplitude = 1, pivot = 0 ) => {
texcoord = texcoord.convert( 'vec2|vec3' ); // overloading type
const noise_vec4 = vec4( mx_perlin_noise_vec3( texcoord ), mx_perlin_noise_float( texcoord.add( vec2( 19, 73 ) ) ) );
return noise_vec4.mul( amplitude ).add( pivot );
};
const mx_worley_noise_float = ( texcoord = uv(), jitter = 1 ) => mx_worley_noise_float$1( texcoord.convert( 'vec2|vec3' ), jitter, int( 1 ) );
const mx_worley_noise_vec2 = ( texcoord = uv(), jitter = 1 ) => mx_worley_noise_vec2$1( texcoord.convert( 'vec2|vec3' ), jitter, int( 1 ) );
const mx_worley_noise_vec3 = ( texcoord = uv(), jitter = 1 ) => mx_worley_noise_vec3$1( texcoord.convert( 'vec2|vec3' ), jitter, int( 1 ) );
const mx_cell_noise_float = ( texcoord = uv() ) => mx_cell_noise_float$1( texcoord.convert( 'vec2|vec3' ) );
const mx_fractal_noise_float = ( position = uv(), octaves = 3, lacunarity = 2, diminish = .5, amplitude = 1 ) => mx_fractal_noise_float$1( position, int( octaves ), lacunarity, diminish ).mul( amplitude );
const mx_fractal_noise_vec2 = ( position = uv(), octaves = 3, lacunarity = 2, diminish = .5, amplitude = 1 ) => mx_fractal_noise_vec2$1( position, int( octaves ), lacunarity, diminish ).mul( amplitude );
const mx_fractal_noise_vec3 = ( position = uv(), octaves = 3, lacunarity = 2, diminish = .5, amplitude = 1 ) => mx_fractal_noise_vec3$1( position, int( octaves ), lacunarity, diminish ).mul( amplitude );
const mx_fractal_noise_vec4 = ( position = uv(), octaves = 3, lacunarity = 2, diminish = .5, amplitude = 1 ) => mx_fractal_noise_vec4$1( position, int( octaves ), lacunarity, diminish ).mul( amplitude );
/** @module getParallaxCorrectNormal **/
/**
* This computes a parallax corrected normal which is used for box-projected cube mapping (BPCEM).
*
* Reference: {@link https://devlog-martinsh.blogspot.com/2011/09/box-projected-cube-environment-mapping.html}
*
* ```js
* const uvNode = getParallaxCorrectNormal( reflectVector, vec3( 200, 100, 100 ), vec3( 0, - 50, 0 ) );
* material.envNode = pmremTexture( renderTarget.texture, uvNode );
* ```
* @function
* @param {Node<vec3>} normal - The normal to correct.
* @param {Node<vec3>} cubeSize - The cube size should reflect the size of the environment (BPCEM is usually applied in closed environments like rooms).
* @param {Node<vec3>} cubePos - The cube position.
* @return {Node<vec3>} The parallax corrected normal.
*/
const getParallaxCorrectNormal = /*@__PURE__*/ Fn( ( [ normal, cubeSize, cubePos ] ) => {
const nDir = normalize( normal ).toVar( 'nDir' );
const rbmax = sub( float( 0.5 ).mul( cubeSize.sub( cubePos ) ), positionWorld ).div( nDir ).toVar( 'rbmax' );
const rbmin = sub( float( - 0.5 ).mul( cubeSize.sub( cubePos ) ), positionWorld ).div( nDir ).toVar( 'rbmin' );
const rbminmax = vec3().toVar( 'rbminmax' );
rbminmax.x = nDir.x.greaterThan( float( 0 ) ).select( rbmax.x, rbmin.x );
rbminmax.y = nDir.y.greaterThan( float( 0 ) ).select( rbmax.y, rbmin.y );
rbminmax.z = nDir.z.greaterThan( float( 0 ) ).select( rbmax.z, rbmin.z );
const correction = min$1( min$1( rbminmax.x, rbminmax.y ), rbminmax.z ).toVar( 'correction' );
const boxIntersection = positionWorld.add( nDir.mul( correction ) ).toVar( 'boxIntersection' );
return boxIntersection.sub( cubePos );
} );
const getShIrradianceAt = /*@__PURE__*/ Fn( ( [ normal, shCoefficients ] ) => {
// normal is assumed to have unit length
const x = normal.x, y = normal.y, z = normal.z;
// band 0
let result = shCoefficients.element( 0 ).mul( 0.886227 );
// band 1
result = result.add( shCoefficients.element( 1 ).mul( 2.0 * 0.511664 ).mul( y ) );
result = result.add( shCoefficients.element( 2 ).mul( 2.0 * 0.511664 ).mul( z ) );
result = result.add( shCoefficients.element( 3 ).mul( 2.0 * 0.511664 ).mul( x ) );
// band 2
result = result.add( shCoefficients.element( 4 ).mul( 2.0 * 0.429043 ).mul( x ).mul( y ) );
result = result.add( shCoefficients.element( 5 ).mul( 2.0 * 0.429043 ).mul( y ).mul( z ) );
result = result.add( shCoefficients.element( 6 ).mul( z.mul( z ).mul( 0.743125 ).sub( 0.247708 ) ) );
result = result.add( shCoefficients.element( 7 ).mul( 2.0 * 0.429043 ).mul( x ).mul( z ) );
result = result.add( shCoefficients.element( 8 ).mul( 0.429043 ).mul( mul( x, x ).sub( mul( y, y ) ) ) );
return result;
} );
// constants
var TSL = /*#__PURE__*/Object.freeze({
__proto__: null,
BRDF_GGX: BRDF_GGX,
BRDF_Lambert: BRDF_Lambert,
BasicShadowFilter: BasicShadowFilter,
Break: Break,
Const: Const,
Continue: Continue,
DFGApprox: DFGApprox,
D_GGX: D_GGX,
Discard: Discard,
EPSILON: EPSILON,
F_Schlick: F_Schlick,
Fn: Fn,
INFINITY: INFINITY,
If: If,
Loop: Loop,
NodeAccess: NodeAccess,
NodeShaderStage: NodeShaderStage,
NodeType: NodeType,
NodeUpdateType: NodeUpdateType,
PCFShadowFilter: PCFShadowFilter,
PCFSoftShadowFilter: PCFSoftShadowFilter,
PI: PI,
PI2: PI2,
Return: Return,
Schlick_to_F0: Schlick_to_F0,
ScriptableNodeResources: ScriptableNodeResources,
ShaderNode: ShaderNode,
TBNViewMatrix: TBNViewMatrix,
VSMShadowFilter: VSMShadowFilter,
V_GGX_SmithCorrelated: V_GGX_SmithCorrelated,
Var: Var,
abs: abs,
acesFilmicToneMapping: acesFilmicToneMapping,
acos: acos,
add: add,
addMethodChaining: addMethodChaining,
addNodeElement: addNodeElement,
agxToneMapping: agxToneMapping,
all: all,
alphaT: alphaT,
and: and,
anisotropy: anisotropy,
anisotropyB: anisotropyB,
anisotropyT: anisotropyT,
any: any,
append: append,
array: array,
arrayBuffer: arrayBuffer,
asin: asin,
assign: assign,
atan: atan,
atan2: atan2,
atomicAdd: atomicAdd,
atomicAnd: atomicAnd,
atomicFunc: atomicFunc,
atomicLoad: atomicLoad,
atomicMax: atomicMax,
atomicMin: atomicMin,
atomicOr: atomicOr,
atomicStore: atomicStore,
atomicSub: atomicSub,
atomicXor: atomicXor,
attenuationColor: attenuationColor,
attenuationDistance: attenuationDistance,
attribute: attribute,
attributeArray: attributeArray,
backgroundBlurriness: backgroundBlurriness,
backgroundIntensity: backgroundIntensity,
backgroundRotation: backgroundRotation,
batch: batch,
billboarding: billboarding,
bitAnd: bitAnd,
bitNot: bitNot,
bitOr: bitOr,
bitXor: bitXor,
bitangentGeometry: bitangentGeometry,
bitangentLocal: bitangentLocal,
bitangentView: bitangentView,
bitangentWorld: bitangentWorld,
bitcast: bitcast,
blendBurn: blendBurn,
blendColor: blendColor,
blendDodge: blendDodge,
blendOverlay: blendOverlay,
blendScreen: blendScreen,
blur: blur,
bool: bool,
buffer: buffer,
bufferAttribute: bufferAttribute,
bumpMap: bumpMap,
burn: burn,
bvec2: bvec2,
bvec3: bvec3,
bvec4: bvec4,
bypass: bypass,
cache: cache,
call: call,
cameraFar: cameraFar,
cameraIndex: cameraIndex,
cameraNear: cameraNear,
cameraNormalMatrix: cameraNormalMatrix,
cameraPosition: cameraPosition,
cameraProjectionMatrix: cameraProjectionMatrix,
cameraProjectionMatrixInverse: cameraProjectionMatrixInverse,
cameraViewMatrix: cameraViewMatrix,
cameraWorldMatrix: cameraWorldMatrix,
cbrt: cbrt,
cdl: cdl,
ceil: ceil,
checker: checker,
cineonToneMapping: cineonToneMapping,
clamp: clamp,
clearcoat: clearcoat,
clearcoatRoughness: clearcoatRoughness,
code: code,
color: color,
colorSpaceToWorking: colorSpaceToWorking,
colorToDirection: colorToDirection,
compute: compute,
cond: cond,
context: context,
convert: convert,
convertColorSpace: convertColorSpace,
convertToTexture: convertToTexture,
cos: cos,
cross: cross,
cubeTexture: cubeTexture,
dFdx: dFdx,
dFdy: dFdy,
dashSize: dashSize,
defaultBuildStages: defaultBuildStages,
defaultShaderStages: defaultShaderStages,
defined: defined,
degrees: degrees,
deltaTime: deltaTime,
densityFog: densityFog,
densityFogFactor: densityFogFactor,
depth: depth,
depthPass: depthPass,
difference: difference,
diffuseColor: diffuseColor,
directPointLight: directPointLight,
directionToColor: directionToColor,
dispersion: dispersion,
distance: distance,
div: div,
dodge: dodge,
dot: dot,
drawIndex: drawIndex,
dynamicBufferAttribute: dynamicBufferAttribute,
element: element,
emissive: emissive,
equal: equal,
equals: equals,
equirectUV: equirectUV,
exp: exp,
exp2: exp2,
expression: expression,
faceDirection: faceDirection,
faceForward: faceForward,
faceforward: faceforward,
float: float,
floor: floor,
fog: fog,
fract: fract,
frameGroup: frameGroup,
frameId: frameId,
frontFacing: frontFacing,
fwidth: fwidth,
gain: gain,
gapSize: gapSize,
getConstNodeType: getConstNodeType,
getCurrentStack: getCurrentStack,
getDirection: getDirection,
getDistanceAttenuation: getDistanceAttenuation,
getGeometryRoughness: getGeometryRoughness,
getNormalFromDepth: getNormalFromDepth,
getParallaxCorrectNormal: getParallaxCorrectNormal,
getRoughness: getRoughness,
getScreenPosition: getScreenPosition,
getShIrradianceAt: getShIrradianceAt,
getTextureIndex: getTextureIndex,
getViewPosition: getViewPosition,
globalId: globalId,
glsl: glsl,
glslFn: glslFn,
grayscale: grayscale,
greaterThan: greaterThan,
greaterThanEqual: greaterThanEqual,
hash: hash,
highpModelNormalViewMatrix: highpModelNormalViewMatrix,
highpModelViewMatrix: highpModelViewMatrix,
hue: hue,
instance: instance,
instanceIndex: instanceIndex,
instancedArray: instancedArray,
instancedBufferAttribute: instancedBufferAttribute,
instancedDynamicBufferAttribute: instancedDynamicBufferAttribute,
instancedMesh: instancedMesh,
int: int,
inverseSqrt: inverseSqrt,
inversesqrt: inversesqrt,
invocationLocalIndex: invocationLocalIndex,
invocationSubgroupIndex: invocationSubgroupIndex,
ior: ior,
iridescence: iridescence,
iridescenceIOR: iridescenceIOR,
iridescenceThickness: iridescenceThickness,
ivec2: ivec2,
ivec3: ivec3,
ivec4: ivec4,
js: js,
label: label,
length: length,
lengthSq: lengthSq,
lessThan: lessThan,
lessThanEqual: lessThanEqual,
lightPosition: lightPosition,
lightProjectionUV: lightProjectionUV,
lightShadowMatrix: lightShadowMatrix,
lightTargetDirection: lightTargetDirection,
lightTargetPosition: lightTargetPosition,
lightViewPosition: lightViewPosition,
lightingContext: lightingContext,
lights: lights,
linearDepth: linearDepth,
linearToneMapping: linearToneMapping,
localId: localId,
log: log,
log2: log2,
logarithmicDepthToViewZ: logarithmicDepthToViewZ,
loop: loop,
luminance: luminance,
mat2: mat2,
mat3: mat3,
mat4: mat4,
matcapUV: matcapUV,
materialAO: materialAO,
materialAlphaTest: materialAlphaTest,
materialAnisotropy: materialAnisotropy,
materialAnisotropyVector: materialAnisotropyVector,
materialAttenuationColor: materialAttenuationColor,
materialAttenuationDistance: materialAttenuationDistance,
materialClearcoat: materialClearcoat,
materialClearcoatNormal: materialClearcoatNormal,
materialClearcoatRoughness: materialClearcoatRoughness,
materialColor: materialColor,
materialDispersion: materialDispersion,
materialEmissive: materialEmissive,
materialIOR: materialIOR,
materialIridescence: materialIridescence,
materialIridescenceIOR: materialIridescenceIOR,
materialIridescenceThickness: materialIridescenceThickness,
materialLightMap: materialLightMap,
materialLineDashOffset: materialLineDashOffset,
materialLineDashSize: materialLineDashSize,
materialLineGapSize: materialLineGapSize,
materialLineScale: materialLineScale,
materialLineWidth: materialLineWidth,
materialMetalness: materialMetalness,
materialNormal: materialNormal,
materialOpacity: materialOpacity,
materialPointSize: materialPointSize,
materialReference: materialReference,
materialReflectivity: materialReflectivity,
materialRefractionRatio: materialRefractionRatio,
materialRotation: materialRotation,
materialRoughness: materialRoughness,
materialSheen: materialSheen,
materialSheenRoughness: materialSheenRoughness,
materialShininess: materialShininess,
materialSpecular: materialSpecular,
materialSpecularColor: materialSpecularColor,
materialSpecularIntensity: materialSpecularIntensity,
materialSpecularStrength: materialSpecularStrength,
materialThickness: materialThickness,
materialTransmission: materialTransmission,
max: max$1,
maxMipLevel: maxMipLevel,
mediumpModelViewMatrix: mediumpModelViewMatrix,
metalness: metalness,
min: min$1,
mix: mix,
mixElement: mixElement,
mod: mod,
modInt: modInt,
modelDirection: modelDirection,
modelNormalMatrix: modelNormalMatrix,
modelPosition: modelPosition,
modelScale: modelScale,
modelViewMatrix: modelViewMatrix,
modelViewPosition: modelViewPosition,
modelViewProjection: modelViewProjection,
modelWorldMatrix: modelWorldMatrix,
modelWorldMatrixInverse: modelWorldMatrixInverse,
morphReference: morphReference,
mrt: mrt,
mul: mul,
mx_aastep: mx_aastep,
mx_cell_noise_float: mx_cell_noise_float,
mx_contrast: mx_contrast,
mx_fractal_noise_float: mx_fractal_noise_float,
mx_fractal_noise_vec2: mx_fractal_noise_vec2,
mx_fractal_noise_vec3: mx_fractal_noise_vec3,
mx_fractal_noise_vec4: mx_fractal_noise_vec4,
mx_hsvtorgb: mx_hsvtorgb,
mx_noise_float: mx_noise_float,
mx_noise_vec3: mx_noise_vec3,
mx_noise_vec4: mx_noise_vec4,
mx_ramplr: mx_ramplr,
mx_ramptb: mx_ramptb,
mx_rgbtohsv: mx_rgbtohsv,
mx_safepower: mx_safepower,
mx_splitlr: mx_splitlr,
mx_splittb: mx_splittb,
mx_srgb_texture_to_lin_rec709: mx_srgb_texture_to_lin_rec709,
mx_transform_uv: mx_transform_uv,
mx_worley_noise_float: mx_worley_noise_float,
mx_worley_noise_vec2: mx_worley_noise_vec2,
mx_worley_noise_vec3: mx_worley_noise_vec3,
negate: negate,
neutralToneMapping: neutralToneMapping,
nodeArray: nodeArray,
nodeImmutable: nodeImmutable,
nodeObject: nodeObject,
nodeObjects: nodeObjects,
nodeProxy: nodeProxy,
normalFlat: normalFlat,
normalGeometry: normalGeometry,
normalLocal: normalLocal,
normalMap: normalMap,
normalView: normalView,
normalWorld: normalWorld,
normalize: normalize,
not: not,
notEqual: notEqual,
numWorkgroups: numWorkgroups,
objectDirection: objectDirection,
objectGroup: objectGroup,
objectPosition: objectPosition,
objectScale: objectScale,
objectViewPosition: objectViewPosition,
objectWorldMatrix: objectWorldMatrix,
oneMinus: oneMinus,
or: or,
orthographicDepthToViewZ: orthographicDepthToViewZ,
oscSawtooth: oscSawtooth,
oscSine: oscSine,
oscSquare: oscSquare,
oscTriangle: oscTriangle,
output: output,
outputStruct: outputStruct,
overlay: overlay,
overloadingFn: overloadingFn,
parabola: parabola,
parallaxDirection: parallaxDirection,
parallaxUV: parallaxUV,
parameter: parameter,
pass: pass,
passTexture: passTexture,
pcurve: pcurve,
perspectiveDepthToViewZ: perspectiveDepthToViewZ,
pmremTexture: pmremTexture,
pointUV: pointUV,
pointWidth: pointWidth,
positionGeometry: positionGeometry,
positionLocal: positionLocal,
positionPrevious: positionPrevious,
positionView: positionView,
positionViewDirection: positionViewDirection,
positionWorld: positionWorld,
positionWorldDirection: positionWorldDirection,
posterize: posterize,
pow: pow,
pow2: pow2,
pow3: pow3,
pow4: pow4,
property: property,
radians: radians,
rand: rand,
range: range,
rangeFog: rangeFog,
rangeFogFactor: rangeFogFactor,
reciprocal: reciprocal,
reference: reference,
referenceBuffer: referenceBuffer,
reflect: reflect,
reflectVector: reflectVector,
reflectView: reflectView,
reflector: reflector,
refract: refract,
refractVector: refractVector,
refractView: refractView,
reinhardToneMapping: reinhardToneMapping,
remainder: remainder,
remap: remap,
remapClamp: remapClamp,
renderGroup: renderGroup,
renderOutput: renderOutput,
rendererReference: rendererReference,
rotate: rotate,
rotateUV: rotateUV,
roughness: roughness,
round: round,
rtt: rtt,
sRGBTransferEOTF: sRGBTransferEOTF,
sRGBTransferOETF: sRGBTransferOETF,
sampler: sampler,
saturate: saturate,
saturation: saturation,
screen: screen,
screenCoordinate: screenCoordinate,
screenSize: screenSize,
screenUV: screenUV,
scriptable: scriptable,
scriptableValue: scriptableValue,
select: select,
setCurrentStack: setCurrentStack,
shaderStages: shaderStages,
shadow: shadow,
shadowPositionWorld: shadowPositionWorld,
shapeCircle: shapeCircle,
sharedUniformGroup: sharedUniformGroup,
sheen: sheen,
sheenRoughness: sheenRoughness,
shiftLeft: shiftLeft,
shiftRight: shiftRight,
shininess: shininess,
sign: sign,
sin: sin,
sinc: sinc,
skinning: skinning,
skinningReference: skinningReference,
smoothstep: smoothstep,
smoothstepElement: smoothstepElement,
specularColor: specularColor,
specularF90: specularF90,
spherizeUV: spherizeUV,
split: split,
spritesheetUV: spritesheetUV,
sqrt: sqrt,
stack: stack,
step: step,
storage: storage,
storageBarrier: storageBarrier,
storageObject: storageObject,
storageTexture: storageTexture,
string: string,
struct: struct,
sub: sub,
subgroupIndex: subgroupIndex,
subgroupSize: subgroupSize,
tan: tan,
tangentGeometry: tangentGeometry,
tangentLocal: tangentLocal,
tangentView: tangentView,
tangentWorld: tangentWorld,
temp: temp,
texture: texture,
texture3D: texture3D,
textureBarrier: textureBarrier,
textureBicubic: textureBicubic,
textureCubeUV: textureCubeUV,
textureLoad: textureLoad,
textureSize: textureSize,
textureStore: textureStore,
thickness: thickness,
time: time,
timerDelta: timerDelta,
timerGlobal: timerGlobal,
timerLocal: timerLocal,
toOutputColorSpace: toOutputColorSpace,
toWorkingColorSpace: toWorkingColorSpace,
toneMapping: toneMapping,
toneMappingExposure: toneMappingExposure,
toonOutlinePass: toonOutlinePass,
transformDirection: transformDirection,
transformNormal: transformNormal,
transformNormalToView: transformNormalToView,
transformedBentNormalView: transformedBentNormalView,
transformedBitangentView: transformedBitangentView,
transformedBitangentWorld: transformedBitangentWorld,
transformedClearcoatNormalView: transformedClearcoatNormalView,
transformedNormalView: transformedNormalView,
transformedNormalWorld: transformedNormalWorld,
transformedTangentView: transformedTangentView,
transformedTangentWorld: transformedTangentWorld,
transmission: transmission,
transpose: transpose,
triNoise3D: triNoise3D,
triplanarTexture: triplanarTexture,
triplanarTextures: triplanarTextures,
trunc: trunc,
tslFn: tslFn,
uint: uint,
uniform: uniform,
uniformArray: uniformArray,
uniformGroup: uniformGroup,
uniforms: uniforms,
userData: userData,
uv: uv,
uvec2: uvec2,
uvec3: uvec3,
uvec4: uvec4,
varying: varying,
varyingProperty: varyingProperty,
vec2: vec2,
vec3: vec3,
vec4: vec4,
vectorComponents: vectorComponents,
velocity: velocity,
vertexColor: vertexColor,
vertexIndex: vertexIndex,
vertexStage: vertexStage,
vibrance: vibrance,
viewZToLogarithmicDepth: viewZToLogarithmicDepth,
viewZToOrthographicDepth: viewZToOrthographicDepth,
viewZToPerspectiveDepth: viewZToPerspectiveDepth,
viewport: viewport,
viewportBottomLeft: viewportBottomLeft,
viewportCoordinate: viewportCoordinate,
viewportDepthTexture: viewportDepthTexture,
viewportLinearDepth: viewportLinearDepth,
viewportMipTexture: viewportMipTexture,
viewportResolution: viewportResolution,
viewportSafeUV: viewportSafeUV,
viewportSharedTexture: viewportSharedTexture,
viewportSize: viewportSize,
viewportTexture: viewportTexture,
viewportTopLeft: viewportTopLeft,
viewportUV: viewportUV,
wgsl: wgsl,
wgslFn: wgslFn,
workgroupArray: workgroupArray,
workgroupBarrier: workgroupBarrier,
workgroupId: workgroupId,
workingToColorSpace: workingToColorSpace,
xor: xor
});
const _clearColor$1 = /*@__PURE__*/ new Color4();
/**
* This renderer module manages the background.
*
* @private
* @augments DataMap
*/
class Background extends DataMap {
/**
* Constructs a new background management component.
*
* @param {Renderer} renderer - The renderer.
* @param {Nodes} nodes - Renderer component for managing nodes related logic.
*/
constructor( renderer, nodes ) {
super();
/**
* The renderer.
*
* @type {Renderer}
*/
this.renderer = renderer;
/**
* Renderer component for managing nodes related logic.
*
* @type {Nodes}
*/
this.nodes = nodes;
}
/**
* Updates the background for the given scene. Depending on how `Scene.background`
* or `Scene.backgroundNode` are configured, this method might configure a simple clear
* or add a mesh to the render list for rendering the background as a textured plane
* or skybox.
*
* @param {Scene} scene - The scene.
* @param {RenderList} renderList - The current render list.
* @param {RenderContext} renderContext - The current render context.
*/
update( scene, renderList, renderContext ) {
const renderer = this.renderer;
const background = this.nodes.getBackgroundNode( scene ) || scene.background;
let forceClear = false;
if ( background === null ) {
// no background settings, use clear color configuration from the renderer
renderer._clearColor.getRGB( _clearColor$1, LinearSRGBColorSpace );
_clearColor$1.a = renderer._clearColor.a;
} else if ( background.isColor === true ) {
// background is an opaque color
background.getRGB( _clearColor$1, LinearSRGBColorSpace );
_clearColor$1.a = 1;
forceClear = true;
} else if ( background.isNode === true ) {
const sceneData = this.get( scene );
const backgroundNode = background;
_clearColor$1.copy( renderer._clearColor );
let backgroundMesh = sceneData.backgroundMesh;
if ( backgroundMesh === undefined ) {
const backgroundMeshNode = context( vec4( backgroundNode ).mul( backgroundIntensity ), {
// @TODO: Add Texture2D support using node context
getUV: () => backgroundRotation.mul( normalWorld ),
getTextureLevel: () => backgroundBlurriness
} );
let viewProj = modelViewProjection;
viewProj = viewProj.setZ( viewProj.w );
const nodeMaterial = new NodeMaterial();
nodeMaterial.name = 'Background.material';
nodeMaterial.side = BackSide;
nodeMaterial.depthTest = false;
nodeMaterial.depthWrite = false;
nodeMaterial.fog = false;
nodeMaterial.lights = false;
nodeMaterial.vertexNode = viewProj;
nodeMaterial.colorNode = backgroundMeshNode;
sceneData.backgroundMeshNode = backgroundMeshNode;
sceneData.backgroundMesh = backgroundMesh = new Mesh( new SphereGeometry( 1, 32, 32 ), nodeMaterial );
backgroundMesh.frustumCulled = false;
backgroundMesh.name = 'Background.mesh';
backgroundMesh.onBeforeRender = function ( renderer, scene, camera ) {
this.matrixWorld.copyPosition( camera.matrixWorld );
};
}
const backgroundCacheKey = backgroundNode.getCacheKey();
if ( sceneData.backgroundCacheKey !== backgroundCacheKey ) {
sceneData.backgroundMeshNode.node = vec4( backgroundNode ).mul( backgroundIntensity );
sceneData.backgroundMeshNode.needsUpdate = true;
backgroundMesh.material.needsUpdate = true;
sceneData.backgroundCacheKey = backgroundCacheKey;
}
renderList.unshift( backgroundMesh, backgroundMesh.geometry, backgroundMesh.material, 0, 0, null, null );
} else {
console.error( 'THREE.Renderer: Unsupported background configuration.', background );
}
//
const environmentBlendMode = renderer.xr.getEnvironmentBlendMode();
if ( environmentBlendMode === 'additive' ) {
_clearColor$1.set( 0, 0, 0, 1 );
} else if ( environmentBlendMode === 'alpha-blend' ) {
_clearColor$1.set( 0, 0, 0, 0 );
}
//
if ( renderer.autoClear === true || forceClear === true ) {
const clearColorValue = renderContext.clearColorValue;
clearColorValue.r = _clearColor$1.r;
clearColorValue.g = _clearColor$1.g;
clearColorValue.b = _clearColor$1.b;
clearColorValue.a = _clearColor$1.a;
// premultiply alpha
if ( renderer.backend.isWebGLBackend === true || renderer.alpha === true ) {
clearColorValue.r *= clearColorValue.a;
clearColorValue.g *= clearColorValue.a;
clearColorValue.b *= clearColorValue.a;
}
//
renderContext.depthClearValue = renderer._clearDepth;
renderContext.stencilClearValue = renderer._clearStencil;
renderContext.clearColor = renderer.autoClearColor === true;
renderContext.clearDepth = renderer.autoClearDepth === true;
renderContext.clearStencil = renderer.autoClearStencil === true;
} else {
renderContext.clearColor = false;
renderContext.clearDepth = false;
renderContext.clearStencil = false;
}
}
}
let _id$6 = 0;
/**
* A bind group represents a collection of bindings and thus a collection
* or resources. Bind groups are assigned to pipelines to provide them
* with the required resources (like uniform buffers or textures).
*
* @private
*/
class BindGroup {
/**
* Constructs a new bind group.
*
* @param {String} name - The bind group's name.
* @param {Array<Binding>} bindings - An array of bindings.
* @param {Number} index - The group index.
* @param {Array<Binding>} bindingsReference - An array of reference bindings.
*/
constructor( name = '', bindings = [], index = 0, bindingsReference = [] ) {
/**
* The bind group's name.
*
* @type {String}
*/
this.name = name;
/**
* An array of bindings.
*
* @type {Array<Binding>}
*/
this.bindings = bindings;
/**
* The group index.
*
* @type {Number}
*/
this.index = index;
/**
* An array of reference bindings.
*
* @type {Array<Binding>}
*/
this.bindingsReference = bindingsReference;
/**
* The group's ID.
*
* @type {Number}
*/
this.id = _id$6 ++;
}
}
/**
* This module represents the state of a node builder after it was
* used to build the nodes for a render object. The state holds the
* results of the build for further processing in the renderer.
*
* Render objects with identical cache keys share the same node builder state.
*
* @private
*/
class NodeBuilderState {
/**
* Constructs a new node builder state.
*
* @param {String?} vertexShader - The native vertex shader code.
* @param {String?} fragmentShader - The native fragment shader code.
* @param {String?} computeShader - The native compute shader code.
* @param {Array<NodeAttribute>} nodeAttributes - An array of node attributes.
* @param {Array<BindGroup>} bindings - An array of bind groups.
* @param {Array<Node>} updateNodes - An array of nodes that implement their `update()` method.
* @param {Array<Node>} updateBeforeNodes - An array of nodes that implement their `updateBefore()` method.
* @param {Array<Node>} updateAfterNodes - An array of nodes that implement their `updateAfter()` method.
* @param {NodeMaterialObserver} observer - A node material observer.
* @param {Array<Object>} transforms - An array with transform attribute objects. Only relevant when using compute shaders with WebGL 2.
*/
constructor( vertexShader, fragmentShader, computeShader, nodeAttributes, bindings, updateNodes, updateBeforeNodes, updateAfterNodes, observer, transforms = [] ) {
/**
* The native vertex shader code.
*
* @type {String}
*/
this.vertexShader = vertexShader;
/**
* The native fragment shader code.
*
* @type {String}
*/
this.fragmentShader = fragmentShader;
/**
* The native compute shader code.
*
* @type {String}
*/
this.computeShader = computeShader;
/**
* An array with transform attribute objects.
* Only relevant when using compute shaders with WebGL 2.
*
* @type {Array<Object>}
*/
this.transforms = transforms;
/**
* An array of node attributes representing
* the attributes of the shaders.
*
* @type {Array<NodeAttribute>}
*/
this.nodeAttributes = nodeAttributes;
/**
* An array of bind groups representing the uniform or storage
* buffers, texture or samplers of the shader.
*
* @type {Array<BindGroup>}
*/
this.bindings = bindings;
/**
* An array of nodes that implement their `update()` method.
*
* @type {Array<Node>}
*/
this.updateNodes = updateNodes;
/**
* An array of nodes that implement their `updateBefore()` method.
*
* @type {Array<Node>}
*/
this.updateBeforeNodes = updateBeforeNodes;
/**
* An array of nodes that implement their `updateAfter()` method.
*
* @type {Array<Node>}
*/
this.updateAfterNodes = updateAfterNodes;
/**
* A node material observer.
*
* @type {NodeMaterialObserver}
*/
this.observer = observer;
/**
* How often this state is used by render objects.
*
* @type {Number}
*/
this.usedTimes = 0;
}
/**
* This method is used to create a array of bind groups based
* on the existing bind groups of this state. Shared groups are
* not cloned.
*
* @return {Array<BindGroup>} A array of bind groups.
*/
createBindings() {
const bindings = [];
for ( const instanceGroup of this.bindings ) {
const shared = instanceGroup.bindings[ 0 ].groupNode.shared; // All bindings in the group must have the same groupNode.
if ( shared !== true ) {
const bindingsGroup = new BindGroup( instanceGroup.name, [], instanceGroup.index, instanceGroup );
bindings.push( bindingsGroup );
for ( const instanceBinding of instanceGroup.bindings ) {
bindingsGroup.bindings.push( instanceBinding.clone() );
}
} else {
bindings.push( instanceGroup );
}
}
return bindings;
}
}
/**
* {@link NodeBuilder} is going to create instances of this class during the build process
* of nodes. They represent the final shader attributes that are going to be generated
* by the builder. Arrays of node attributes is maintained in {@link NodeBuilder#attributes}
* and {@link NodeBuilder#bufferAttributes} for this purpose.
*/
class NodeAttribute {
/**
* Constructs a new node attribute.
*
* @param {String} name - The name of the attribute.
* @param {String} type - The type of the attribute.
* @param {Node?} node - An optional reference to the node.
*/
constructor( name, type, node = null ) {
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isNodeAttribute = true;
/**
* The name of the attribute.
*
* @type {String}
*/
this.name = name;
/**
* The type of the attribute.
*
* @type {String}
*/
this.type = type;
/**
* An optional reference to the node.
*
* @type {Node?}
* @default null
*/
this.node = node;
}
}
/**
* {@link NodeBuilder} is going to create instances of this class during the build process
* of nodes. They represent the final shader uniforms that are going to be generated
* by the builder. A dictionary of node uniforms is maintained in {@link NodeBuilder#uniforms}
* for this purpose.
*/
class NodeUniform {
/**
* Constructs a new node uniform.
*
* @param {String} name - The name of the uniform.
* @param {String} type - The type of the uniform.
* @param {UniformNode} node - An reference to the node.
*/
constructor( name, type, node ) {
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isNodeUniform = true;
/**
* The name of the uniform.
*
* @type {String}
*/
this.name = name;
/**
* The type of the uniform.
*
* @type {String}
*/
this.type = type;
/**
* An reference to the node.
*
* @type {UniformNode}
*/
this.node = node.getSelf();
}
/**
* The value of the uniform node.
*
* @type {Any}
*/
get value() {
return this.node.value;
}
set value( val ) {
this.node.value = val;
}
/**
* The id of the uniform node.
*
* @type {Number}
*/
get id() {
return this.node.id;
}
/**
* The uniform node's group.
*
* @type {UniformGroupNode}
*/
get groupNode() {
return this.node.groupNode;
}
}
/**
* {@link NodeBuilder} is going to create instances of this class during the build process
* of nodes. They represent the final shader variables that are going to be generated
* by the builder. A dictionary of node variables is maintained in {@link NodeBuilder#vars} for
* this purpose.
*/
class NodeVar {
/**
* Constructs a new node variable.
*
* @param {String} name - The name of the variable.
* @param {String} type - The type of the variable.
* @param {Boolean} [readOnly=false] - The read-only flag.
* @param {Number?} [count=null] - The size.
*/
constructor( name, type, readOnly = false, count = null ) {
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isNodeVar = true;
/**
* The name of the variable.
*
* @type {String}
*/
this.name = name;
/**
* The type of the variable.
*
* @type {String}
*/
this.type = type;
/**
* The read-only flag.
*
* @type {Boolean}
*/
this.readOnly = readOnly;
/**
* The size.
*
* @type {Number?}
*/
this.count = count;
}
}
/**
* {@link NodeBuilder} is going to create instances of this class during the build process
* of nodes. They represent the final shader varyings that are going to be generated
* by the builder. An array of node varyings is maintained in {@link NodeBuilder#varyings} for
* this purpose.
*
* @augments NodeVar
*/
class NodeVarying extends NodeVar {
/**
* Constructs a new node varying.
*
* @param {String} name - The name of the varying.
* @param {String} type - The type of the varying.
*/
constructor( name, type ) {
super( name, type );
/**
* Whether this varying requires interpolation or not. This property can be used
* to check if the varying can be optimized for a variable.
*
* @type {Boolean}
* @default false
*/
this.needsInterpolation = false;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isNodeVarying = true;
}
}
/**
* {@link NodeBuilder} is going to create instances of this class during the build process
* of nodes. They represent user-defined, native shader code portions that are going to be
* injected by the builder. A dictionary of node codes is maintained in {@link NodeBuilder#codes}
* for this purpose.
*/
class NodeCode {
/**
* Constructs a new code node.
*
* @param {String} name - The name of the code.
* @param {String} type - The node type.
* @param {String} [code=''] - The native shader code.
*/
constructor( name, type, code = '' ) {
/**
* The name of the code.
*
* @type {String}
*/
this.name = name;
/**
* The node type.
*
* @type {String}
*/
this.type = type;
/**
* The native shader code.
*
* @type {String}
* @default ''
*/
this.code = code;
Object.defineProperty( this, 'isNodeCode', { value: true } );
}
}
let _id$5 = 0;
/**
* This utility class is used in {@link NodeBuilder} as an internal
* cache data structure for node data.
*/
class NodeCache {
/**
* Constructs a new node cache.
*
* @param {NodeCache?} parent - A reference to a parent cache.
*/
constructor( parent = null ) {
/**
* The id of the cache.
*
* @type {Number}
* @readonly
*/
this.id = _id$5 ++;
/**
* A weak map for managing node data.
*
* @type {WeakMap<Node, Object>}
*/
this.nodesData = new WeakMap();
/**
* Reference to a parent node cache.
*
* @type {NodeCache?}
* @default null
*/
this.parent = parent;
}
/**
* Returns the data for the given node.
*
* @param {Node} node - The node.
* @return {Object?} The data for the node.
*/
getData( node ) {
let data = this.nodesData.get( node );
if ( data === undefined && this.parent !== null ) {
data = this.parent.getData( node );
}
return data;
}
/**
* Sets the data for a given node.
*
* @param {Node} node - The node.
* @param {Object} data - The data that should be cached.
*/
setData( node, data ) {
this.nodesData.set( node, data );
}
}
class StructType {
constructor( name, members ) {
this.name = name;
this.members = members;
this.output = false;
}
}
/**
* Abstract base class for uniforms.
*
* @abstract
* @private
*/
class Uniform {
/**
* Constructs a new uniform.
*
* @param {String} name - The uniform's name.
* @param {Any} value - The uniform's value.
*/
constructor( name, value ) {
/**
* The uniform's name.
*
* @type {String}
*/
this.name = name;
/**
* The uniform's value.
*
* @type {Any}
*/
this.value = value;
/**
* Used to build the uniform buffer according to the STD140 layout.
* Derived uniforms will set this property to a data type specific
* value.
*
* @type {Number}
*/
this.boundary = 0;
/**
* The item size. Derived uniforms will set this property to a data
* type specific value.
*
* @type {Number}
*/
this.itemSize = 0;
/**
* This property is set by {@link UniformsGroup} and marks
* the start position in the uniform buffer.
*
* @type {Number}
*/
this.offset = 0;
}
/**
* Sets the uniform's value.
*
* @param {Any} value - The value to set.
*/
setValue( value ) {
this.value = value;
}
/**
* Returns the uniform's value.
*
* @return {Any} The value.
*/
getValue() {
return this.value;
}
}
/**
* Represents a Number uniform.
*
* @private
* @augments Uniform
*/
class NumberUniform extends Uniform {
/**
* Constructs a new Number uniform.
*
* @param {String} name - The uniform's name.
* @param {Number} value - The uniform's value.
*/
constructor( name, value = 0 ) {
super( name, value );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isNumberUniform = true;
this.boundary = 4;
this.itemSize = 1;
}
}
/**
* Represents a Vector2 uniform.
*
* @private
* @augments Uniform
*/
class Vector2Uniform extends Uniform {
/**
* Constructs a new Number uniform.
*
* @param {String} name - The uniform's name.
* @param {Vector2} value - The uniform's value.
*/
constructor( name, value = new Vector2() ) {
super( name, value );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isVector2Uniform = true;
this.boundary = 8;
this.itemSize = 2;
}
}
/**
* Represents a Vector3 uniform.
*
* @private
* @augments Uniform
*/
class Vector3Uniform extends Uniform {
/**
* Constructs a new Number uniform.
*
* @param {String} name - The uniform's name.
* @param {Vector3} value - The uniform's value.
*/
constructor( name, value = new Vector3() ) {
super( name, value );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isVector3Uniform = true;
this.boundary = 16;
this.itemSize = 3;
}
}
/**
* Represents a Vector4 uniform.
*
* @private
* @augments Uniform
*/
class Vector4Uniform extends Uniform {
/**
* Constructs a new Number uniform.
*
* @param {String} name - The uniform's name.
* @param {Vector4} value - The uniform's value.
*/
constructor( name, value = new Vector4() ) {
super( name, value );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isVector4Uniform = true;
this.boundary = 16;
this.itemSize = 4;
}
}
/**
* Represents a Color uniform.
*
* @private
* @augments Uniform
*/
class ColorUniform extends Uniform {
/**
* Constructs a new Number uniform.
*
* @param {String} name - The uniform's name.
* @param {Color} value - The uniform's value.
*/
constructor( name, value = new Color() ) {
super( name, value );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isColorUniform = true;
this.boundary = 16;
this.itemSize = 3;
}
}
/**
* Represents a Matrix2 uniform.
*
* @private
* @augments Uniform
*/
class Matrix2Uniform extends Uniform {
/**
* Constructs a new Number uniform.
*
* @param {String} name - The uniform's name.
* @param {Matrix2} value - The uniform's value.
*/
constructor( name, value = new Matrix2() ) {
super( name, value );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isMatrix2Uniform = true;
this.boundary = 16;
this.itemSize = 4;
}
}
/**
* Represents a Matrix3 uniform.
*
* @private
* @augments Uniform
*/
class Matrix3Uniform extends Uniform {
/**
* Constructs a new Number uniform.
*
* @param {String} name - The uniform's name.
* @param {Matrix3} value - The uniform's value.
*/
constructor( name, value = new Matrix3() ) {
super( name, value );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isMatrix3Uniform = true;
this.boundary = 48;
this.itemSize = 12;
}
}
/**
* Represents a Matrix4 uniform.
*
* @private
* @augments Uniform
*/
class Matrix4Uniform extends Uniform {
/**
* Constructs a new Number uniform.
*
* @param {String} name - The uniform's name.
* @param {Matrix4} value - The uniform's value.
*/
constructor( name, value = new Matrix4() ) {
super( name, value );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isMatrix4Uniform = true;
this.boundary = 64;
this.itemSize = 16;
}
}
/**
* A special form of Number uniform binding type.
* It's value is managed by a node object.
*
* @private
* @augments NumberUniform
*/
class NumberNodeUniform extends NumberUniform {
/**
* Constructs a new node-based Number uniform.
*
* @param {NodeUniform} nodeUniform - The node uniform.
*/
constructor( nodeUniform ) {
super( nodeUniform.name, nodeUniform.value );
/**
* The node uniform.
*
* @type {NodeUniform}
*/
this.nodeUniform = nodeUniform;
}
/**
* Overwritten to return the value of the node uniform.
*
* @return {Number} The value.
*/
getValue() {
return this.nodeUniform.value;
}
/**
* Returns the node uniform data type.
*
* @return {String} The data type.
*/
getType() {
return this.nodeUniform.type;
}
}
/**
* A special form of Vector2 uniform binding type.
* It's value is managed by a node object.
*
* @private
* @augments Vector2Uniform
*/
class Vector2NodeUniform extends Vector2Uniform {
/**
* Constructs a new node-based Vector2 uniform.
*
* @param {NodeUniform} nodeUniform - The node uniform.
*/
constructor( nodeUniform ) {
super( nodeUniform.name, nodeUniform.value );
/**
* The node uniform.
*
* @type {NodeUniform}
*/
this.nodeUniform = nodeUniform;
}
/**
* Overwritten to return the value of the node uniform.
*
* @return {Vector2} The value.
*/
getValue() {
return this.nodeUniform.value;
}
/**
* Returns the node uniform data type.
*
* @return {String} The data type.
*/
getType() {
return this.nodeUniform.type;
}
}
/**
* A special form of Vector3 uniform binding type.
* It's value is managed by a node object.
*
* @private
* @augments Vector3Uniform
*/
class Vector3NodeUniform extends Vector3Uniform {
/**
* Constructs a new node-based Vector3 uniform.
*
* @param {NodeUniform} nodeUniform - The node uniform.
*/
constructor( nodeUniform ) {
super( nodeUniform.name, nodeUniform.value );
/**
* The node uniform.
*
* @type {NodeUniform}
*/
this.nodeUniform = nodeUniform;
}
/**
* Overwritten to return the value of the node uniform.
*
* @return {Vector3} The value.
*/
getValue() {
return this.nodeUniform.value;
}
/**
* Returns the node uniform data type.
*
* @return {String} The data type.
*/
getType() {
return this.nodeUniform.type;
}
}
/**
* A special form of Vector4 uniform binding type.
* It's value is managed by a node object.
*
* @private
* @augments Vector4Uniform
*/
class Vector4NodeUniform extends Vector4Uniform {
/**
* Constructs a new node-based Vector4 uniform.
*
* @param {NodeUniform} nodeUniform - The node uniform.
*/
constructor( nodeUniform ) {
super( nodeUniform.name, nodeUniform.value );
/**
* The node uniform.
*
* @type {NodeUniform}
*/
this.nodeUniform = nodeUniform;
}
/**
* Overwritten to return the value of the node uniform.
*
* @return {Vector4} The value.
*/
getValue() {
return this.nodeUniform.value;
}
/**
* Returns the node uniform data type.
*
* @return {String} The data type.
*/
getType() {
return this.nodeUniform.type;
}
}
/**
* A special form of Color uniform binding type.
* It's value is managed by a node object.
*
* @private
* @augments ColorUniform
*/
class ColorNodeUniform extends ColorUniform {
/**
* Constructs a new node-based Color uniform.
*
* @param {NodeUniform} nodeUniform - The node uniform.
*/
constructor( nodeUniform ) {
super( nodeUniform.name, nodeUniform.value );
/**
* The node uniform.
*
* @type {NodeUniform}
*/
this.nodeUniform = nodeUniform;
}
/**
* Overwritten to return the value of the node uniform.
*
* @return {Color} The value.
*/
getValue() {
return this.nodeUniform.value;
}
/**
* Returns the node uniform data type.
*
* @return {String} The data type.
*/
getType() {
return this.nodeUniform.type;
}
}
/**
* A special form of Matrix2 uniform binding type.
* It's value is managed by a node object.
*
* @private
* @augments Matrix2Uniform
*/
class Matrix2NodeUniform extends Matrix2Uniform {
/**
* Constructs a new node-based Matrix2 uniform.
*
* @param {NodeUniform} nodeUniform - The node uniform.
*/
constructor( nodeUniform ) {
super( nodeUniform.name, nodeUniform.value );
/**
* The node uniform.
*
* @type {NodeUniform}
*/
this.nodeUniform = nodeUniform;
}
/**
* Overwritten to return the value of the node uniform.
*
* @return {Matrix2} The value.
*/
getValue() {
return this.nodeUniform.value;
}
/**
* Returns the node uniform data type.
*
* @return {String} The data type.
*/
getType() {
return this.nodeUniform.type;
}
}
/**
* A special form of Matrix3 uniform binding type.
* It's value is managed by a node object.
*
* @private
* @augments Matrix3Uniform
*/
class Matrix3NodeUniform extends Matrix3Uniform {
/**
* Constructs a new node-based Matrix3 uniform.
*
* @param {NodeUniform} nodeUniform - The node uniform.
*/
constructor( nodeUniform ) {
super( nodeUniform.name, nodeUniform.value );
/**
* The node uniform.
*
* @type {NodeUniform}
*/
this.nodeUniform = nodeUniform;
}
/**
* Overwritten to return the value of the node uniform.
*
* @return {Matrix3} The value.
*/
getValue() {
return this.nodeUniform.value;
}
/**
* Returns the node uniform data type.
*
* @return {String} The data type.
*/
getType() {
return this.nodeUniform.type;
}
}
/**
* A special form of Matrix4 uniform binding type.
* It's value is managed by a node object.
*
* @private
* @augments Matrix4Uniform
*/
class Matrix4NodeUniform extends Matrix4Uniform {
/**
* Constructs a new node-based Matrix4 uniform.
*
* @param {NodeUniform} nodeUniform - The node uniform.
*/
constructor( nodeUniform ) {
super( nodeUniform.name, nodeUniform.value );
/**
* The node uniform.
*
* @type {NodeUniform}
*/
this.nodeUniform = nodeUniform;
}
/**
* Overwritten to return the value of the node uniform.
*
* @return {Matrix4} The value.
*/
getValue() {
return this.nodeUniform.value;
}
/**
* Returns the node uniform data type.
*
* @return {String} The data type.
*/
getType() {
return this.nodeUniform.type;
}
}
const LOD_MIN = 4;
// The standard deviations (radians) associated with the extra mips. These are
// chosen to approximate a Trowbridge-Reitz distribution function times the
// geometric shadowing function. These sigma values squared must match the
// variance #defines in cube_uv_reflection_fragment.glsl.js.
const EXTRA_LOD_SIGMA = [ 0.125, 0.215, 0.35, 0.446, 0.526, 0.582 ];
// The maximum length of the blur for loop. Smaller sigmas will use fewer
// samples and exit early, but not recompile the shader.
const MAX_SAMPLES = 20;
const _flatCamera = /*@__PURE__*/ new OrthographicCamera( - 1, 1, 1, - 1, 0, 1 );
const _cubeCamera = /*@__PURE__*/ new PerspectiveCamera( 90, 1 );
const _clearColor = /*@__PURE__*/ new Color();
let _oldTarget = null;
let _oldActiveCubeFace = 0;
let _oldActiveMipmapLevel = 0;
// Golden Ratio
const PHI = ( 1 + Math.sqrt( 5 ) ) / 2;
const INV_PHI = 1 / PHI;
// Vertices of a dodecahedron (except the opposites, which represent the
// same axis), used as axis directions evenly spread on a sphere.
const _axisDirections = [
/*@__PURE__*/ new Vector3( - PHI, INV_PHI, 0 ),
/*@__PURE__*/ new Vector3( PHI, INV_PHI, 0 ),
/*@__PURE__*/ new Vector3( - INV_PHI, 0, PHI ),
/*@__PURE__*/ new Vector3( INV_PHI, 0, PHI ),
/*@__PURE__*/ new Vector3( 0, PHI, - INV_PHI ),
/*@__PURE__*/ new Vector3( 0, PHI, INV_PHI ),
/*@__PURE__*/ new Vector3( - 1, 1, - 1 ),
/*@__PURE__*/ new Vector3( 1, 1, - 1 ),
/*@__PURE__*/ new Vector3( - 1, 1, 1 ),
/*@__PURE__*/ new Vector3( 1, 1, 1 )
];
// maps blur materials to their uniforms dictionary
const _uniformsMap = new WeakMap();
// WebGPU Face indices
const _faceLib = [
3, 1, 5,
0, 4, 2
];
const _direction = /*@__PURE__*/ getDirection( uv(), attribute( 'faceIndex' ) ).normalize();
const _outputDirection = /*@__PURE__*/ vec3( _direction.x, _direction.y, _direction.z );
/**
* This class generates a Prefiltered, Mipmapped Radiance Environment Map
* (PMREM) from a cubeMap environment texture. This allows different levels of
* blur to be quickly accessed based on material roughness. It is packed into a
* special CubeUV format that allows us to perform custom interpolation so that
* we can support nonlinear formats such as RGBE. Unlike a traditional mipmap
* chain, it only goes down to the LOD_MIN level (above), and then creates extra
* even more filtered 'mips' at the same LOD_MIN resolution, associated with
* higher roughness levels. In this way we maintain resolution to smoothly
* interpolate diffuse lighting while limiting sampling computation.
*
* Paper: Fast, Accurate Image-Based Lighting
* https://drive.google.com/file/d/15y8r_UpKlU9SvV4ILb0C3qCPecS8pvLz/view
*/
class PMREMGenerator {
constructor( renderer ) {
this._renderer = renderer;
this._pingPongRenderTarget = null;
this._lodMax = 0;
this._cubeSize = 0;
this._lodPlanes = [];
this._sizeLods = [];
this._sigmas = [];
this._lodMeshes = [];
this._blurMaterial = null;
this._cubemapMaterial = null;
this._equirectMaterial = null;
this._backgroundBox = null;
}
get _hasInitialized() {
return this._renderer.hasInitialized();
}
/**
* Generates a PMREM from a supplied Scene, which can be faster than using an
* image if networking bandwidth is low. Optional sigma specifies a blur radius
* in radians to be applied to the scene before PMREM generation. Optional near
* and far planes ensure the scene is rendered in its entirety (the cubeCamera
* is placed at the origin).
*
* @param {Scene} scene - The scene to be captured.
* @param {Number} [sigma=0] - The blur radius in radians.
* @param {Number} [near=0.1] - The near plane distance.
* @param {Number} [far=100] - The far plane distance.
* @param {RenderTarget?} [renderTarget=null] - The render target to use.
* @return {RenderTarget} The resulting PMREM.
* @see fromSceneAsync
*/
fromScene( scene, sigma = 0, near = 0.1, far = 100, renderTarget = null ) {
this._setSize( 256 );
if ( this._hasInitialized === false ) {
console.warn( 'THREE.PMREMGenerator: .fromScene() called before the backend is initialized. Try using .fromSceneAsync() instead.' );
const cubeUVRenderTarget = renderTarget || this._allocateTargets();
this.fromSceneAsync( scene, sigma, near, far, cubeUVRenderTarget );
return cubeUVRenderTarget;
}
_oldTarget = this._renderer.getRenderTarget();
_oldActiveCubeFace = this._renderer.getActiveCubeFace();
_oldActiveMipmapLevel = this._renderer.getActiveMipmapLevel();
const cubeUVRenderTarget = renderTarget || this._allocateTargets();
cubeUVRenderTarget.depthBuffer = true;
this._sceneToCubeUV( scene, near, far, cubeUVRenderTarget );
if ( sigma > 0 ) {
this._blur( cubeUVRenderTarget, 0, 0, sigma );
}
this._applyPMREM( cubeUVRenderTarget );
this._cleanup( cubeUVRenderTarget );
return cubeUVRenderTarget;
}
/**
* Generates a PMREM from a supplied Scene, which can be faster than using an
* image if networking bandwidth is low. Optional sigma specifies a blur radius
* in radians to be applied to the scene before PMREM generation. Optional near
* and far planes ensure the scene is rendered in its entirety (the cubeCamera
* is placed at the origin).
*
* @param {Scene} scene - The scene to be captured.
* @param {Number} [sigma=0] - The blur radius in radians.
* @param {Number} [near=0.1] - The near plane distance.
* @param {Number} [far=100] - The far plane distance.
* @param {RenderTarget?} [renderTarget=null] - The render target to use.
* @return {Promise<RenderTarget>} The resulting PMREM.
* @see fromScene
*/
async fromSceneAsync( scene, sigma = 0, near = 0.1, far = 100, renderTarget = null ) {
if ( this._hasInitialized === false ) await this._renderer.init();
return this.fromScene( scene, sigma, near, far, renderTarget );
}
/**
* Generates a PMREM from an equirectangular texture, which can be either LDR
* or HDR. The ideal input image size is 1k (1024 x 512),
* as this matches best with the 256 x 256 cubemap output.
*
* @param {Texture} equirectangular - The equirectangular texture to be converted.
* @param {RenderTarget?} [renderTarget=null] - The render target to use.
* @return {RenderTarget} The resulting PMREM.
* @see fromEquirectangularAsync
*/
fromEquirectangular( equirectangular, renderTarget = null ) {
if ( this._hasInitialized === false ) {
console.warn( 'THREE.PMREMGenerator: .fromEquirectangular() called before the backend is initialized. Try using .fromEquirectangularAsync() instead.' );
this._setSizeFromTexture( equirectangular );
const cubeUVRenderTarget = renderTarget || this._allocateTargets();
this.fromEquirectangularAsync( equirectangular, cubeUVRenderTarget );
return cubeUVRenderTarget;
}
return this._fromTexture( equirectangular, renderTarget );
}
/**
* Generates a PMREM from an equirectangular texture, which can be either LDR
* or HDR. The ideal input image size is 1k (1024 x 512),
* as this matches best with the 256 x 256 cubemap output.
*
* @param {Texture} equirectangular - The equirectangular texture to be converted.
* @param {RenderTarget?} [renderTarget=null] - The render target to use.
* @return {Promise<RenderTarget>} The resulting PMREM.
* @see fromEquirectangular
*/
async fromEquirectangularAsync( equirectangular, renderTarget = null ) {
if ( this._hasInitialized === false ) await this._renderer.init();
return this._fromTexture( equirectangular, renderTarget );
}
/**
* Generates a PMREM from an cubemap texture, which can be either LDR
* or HDR. The ideal input cube size is 256 x 256,
* as this matches best with the 256 x 256 cubemap output.
*
* @param {Texture} cubemap - The cubemap texture to be converted.
* @param {RenderTarget?} [renderTarget=null] - The render target to use.
* @return {RenderTarget} The resulting PMREM.
* @see fromCubemapAsync
*/
fromCubemap( cubemap, renderTarget = null ) {
if ( this._hasInitialized === false ) {
console.warn( 'THREE.PMREMGenerator: .fromCubemap() called before the backend is initialized. Try using .fromCubemapAsync() instead.' );
this._setSizeFromTexture( cubemap );
const cubeUVRenderTarget = renderTarget || this._allocateTargets();
this.fromCubemapAsync( cubemap, renderTarget );
return cubeUVRenderTarget;
}
return this._fromTexture( cubemap, renderTarget );
}
/**
* Generates a PMREM from an cubemap texture, which can be either LDR
* or HDR. The ideal input cube size is 256 x 256,
* with the 256 x 256 cubemap output.
*
* @param {Texture} cubemap - The cubemap texture to be converted.
* @param {RenderTarget?} [renderTarget=null] - The render target to use.
* @return {Promise<RenderTarget>} The resulting PMREM.
* @see fromCubemap
*/
async fromCubemapAsync( cubemap, renderTarget = null ) {
if ( this._hasInitialized === false ) await this._renderer.init();
return this._fromTexture( cubemap, renderTarget );
}
/**
* Pre-compiles the cubemap shader. You can get faster start-up by invoking this method during
* your texture's network fetch for increased concurrency.
*
* @returns {Promise}
*/
async compileCubemapShader() {
if ( this._cubemapMaterial === null ) {
this._cubemapMaterial = _getCubemapMaterial();
await this._compileMaterial( this._cubemapMaterial );
}
}
/**
* Pre-compiles the equirectangular shader. You can get faster start-up by invoking this method during
* your texture's network fetch for increased concurrency.
*
* @returns {Promise}
*/
async compileEquirectangularShader() {
if ( this._equirectMaterial === null ) {
this._equirectMaterial = _getEquirectMaterial();
await this._compileMaterial( this._equirectMaterial );
}
}
/**
* Disposes of the PMREMGenerator's internal memory. Note that PMREMGenerator is a static class,
* so you should not need more than one PMREMGenerator object. If you do, calling dispose() on
* one of them will cause any others to also become unusable.
*/
dispose() {
this._dispose();
if ( this._cubemapMaterial !== null ) this._cubemapMaterial.dispose();
if ( this._equirectMaterial !== null ) this._equirectMaterial.dispose();
if ( this._backgroundBox !== null ) {
this._backgroundBox.geometry.dispose();
this._backgroundBox.material.dispose();
}
}
// private interface
_setSizeFromTexture( texture ) {
if ( texture.mapping === CubeReflectionMapping || texture.mapping === CubeRefractionMapping ) {
this._setSize( texture.image.length === 0 ? 16 : ( texture.image[ 0 ].width || texture.image[ 0 ].image.width ) );
} else { // Equirectangular
this._setSize( texture.image.width / 4 );
}
}
_setSize( cubeSize ) {
this._lodMax = Math.floor( Math.log2( cubeSize ) );
this._cubeSize = Math.pow( 2, this._lodMax );
}
_dispose() {
if ( this._blurMaterial !== null ) this._blurMaterial.dispose();
if ( this._pingPongRenderTarget !== null ) this._pingPongRenderTarget.dispose();
for ( let i = 0; i < this._lodPlanes.length; i ++ ) {
this._lodPlanes[ i ].dispose();
}
}
_cleanup( outputTarget ) {
this._renderer.setRenderTarget( _oldTarget, _oldActiveCubeFace, _oldActiveMipmapLevel );
outputTarget.scissorTest = false;
_setViewport( outputTarget, 0, 0, outputTarget.width, outputTarget.height );
}
_fromTexture( texture, renderTarget ) {
this._setSizeFromTexture( texture );
_oldTarget = this._renderer.getRenderTarget();
_oldActiveCubeFace = this._renderer.getActiveCubeFace();
_oldActiveMipmapLevel = this._renderer.getActiveMipmapLevel();
const cubeUVRenderTarget = renderTarget || this._allocateTargets();
this._textureToCubeUV( texture, cubeUVRenderTarget );
this._applyPMREM( cubeUVRenderTarget );
this._cleanup( cubeUVRenderTarget );
return cubeUVRenderTarget;
}
_allocateTargets() {
const width = 3 * Math.max( this._cubeSize, 16 * 7 );
const height = 4 * this._cubeSize;
const params = {
magFilter: LinearFilter,
minFilter: LinearFilter,
generateMipmaps: false,
type: HalfFloatType,
format: RGBAFormat,
colorSpace: LinearSRGBColorSpace,
//depthBuffer: false
};
const cubeUVRenderTarget = _createRenderTarget( width, height, params );
if ( this._pingPongRenderTarget === null || this._pingPongRenderTarget.width !== width || this._pingPongRenderTarget.height !== height ) {
if ( this._pingPongRenderTarget !== null ) {
this._dispose();
}
this._pingPongRenderTarget = _createRenderTarget( width, height, params );
const { _lodMax } = this;
( { sizeLods: this._sizeLods, lodPlanes: this._lodPlanes, sigmas: this._sigmas, lodMeshes: this._lodMeshes } = _createPlanes( _lodMax ) );
this._blurMaterial = _getBlurShader( _lodMax, width, height );
}
return cubeUVRenderTarget;
}
async _compileMaterial( material ) {
const tmpMesh = new Mesh( this._lodPlanes[ 0 ], material );
await this._renderer.compile( tmpMesh, _flatCamera );
}
_sceneToCubeUV( scene, near, far, cubeUVRenderTarget ) {
const cubeCamera = _cubeCamera;
cubeCamera.near = near;
cubeCamera.far = far;
// px, py, pz, nx, ny, nz
const upSign = [ 1, 1, 1, 1, - 1, 1 ];
const forwardSign = [ 1, - 1, 1, - 1, 1, - 1 ];
const renderer = this._renderer;
const originalAutoClear = renderer.autoClear;
renderer.getClearColor( _clearColor );
renderer.autoClear = false;
let backgroundBox = this._backgroundBox;
if ( backgroundBox === null ) {
const backgroundMaterial = new MeshBasicMaterial( {
name: 'PMREM.Background',
side: BackSide,
depthWrite: false,
depthTest: false
} );
backgroundBox = new Mesh( new BoxGeometry(), backgroundMaterial );
}
let useSolidColor = false;
const background = scene.background;
if ( background ) {
if ( background.isColor ) {
backgroundBox.material.color.copy( background );
scene.background = null;
useSolidColor = true;
}
} else {
backgroundBox.material.color.copy( _clearColor );
useSolidColor = true;
}
renderer.setRenderTarget( cubeUVRenderTarget );
renderer.clear();
if ( useSolidColor ) {
renderer.render( backgroundBox, cubeCamera );
}
for ( let i = 0; i < 6; i ++ ) {
const col = i % 3;
if ( col === 0 ) {
cubeCamera.up.set( 0, upSign[ i ], 0 );
cubeCamera.lookAt( forwardSign[ i ], 0, 0 );
} else if ( col === 1 ) {
cubeCamera.up.set( 0, 0, upSign[ i ] );
cubeCamera.lookAt( 0, forwardSign[ i ], 0 );
} else {
cubeCamera.up.set( 0, upSign[ i ], 0 );
cubeCamera.lookAt( 0, 0, forwardSign[ i ] );
}
const size = this._cubeSize;
_setViewport( cubeUVRenderTarget, col * size, i > 2 ? size : 0, size, size );
renderer.render( scene, cubeCamera );
}
renderer.autoClear = originalAutoClear;
scene.background = background;
}
_textureToCubeUV( texture, cubeUVRenderTarget ) {
const renderer = this._renderer;
const isCubeTexture = ( texture.mapping === CubeReflectionMapping || texture.mapping === CubeRefractionMapping );
if ( isCubeTexture ) {
if ( this._cubemapMaterial === null ) {
this._cubemapMaterial = _getCubemapMaterial( texture );
}
} else {
if ( this._equirectMaterial === null ) {
this._equirectMaterial = _getEquirectMaterial( texture );
}
}
const material = isCubeTexture ? this._cubemapMaterial : this._equirectMaterial;
material.fragmentNode.value = texture;
const mesh = this._lodMeshes[ 0 ];
mesh.material = material;
const size = this._cubeSize;
_setViewport( cubeUVRenderTarget, 0, 0, 3 * size, 2 * size );
renderer.setRenderTarget( cubeUVRenderTarget );
renderer.render( mesh, _flatCamera );
}
_applyPMREM( cubeUVRenderTarget ) {
const renderer = this._renderer;
const autoClear = renderer.autoClear;
renderer.autoClear = false;
const n = this._lodPlanes.length;
for ( let i = 1; i < n; i ++ ) {
const sigma = Math.sqrt( this._sigmas[ i ] * this._sigmas[ i ] - this._sigmas[ i - 1 ] * this._sigmas[ i - 1 ] );
const poleAxis = _axisDirections[ ( n - i - 1 ) % _axisDirections.length ];
this._blur( cubeUVRenderTarget, i - 1, i, sigma, poleAxis );
}
renderer.autoClear = autoClear;
}
/**
* This is a two-pass Gaussian blur for a cubemap. Normally this is done
* vertically and horizontally, but this breaks down on a cube. Here we apply
* the blur latitudinally (around the poles), and then longitudinally (towards
* the poles) to approximate the orthogonally-separable blur. It is least
* accurate at the poles, but still does a decent job.
*
* @param {RenderTarget} cubeUVRenderTarget - The cubemap render target.
* @param {Number} lodIn - The input level-of-detail.
* @param {Number} lodOut - The output level-of-detail.
* @param {Number} sigma - The blur radius in radians.
* @param {Vector3} [poleAxis] - The pole axis.
*/
_blur( cubeUVRenderTarget, lodIn, lodOut, sigma, poleAxis ) {
const pingPongRenderTarget = this._pingPongRenderTarget;
this._halfBlur(
cubeUVRenderTarget,
pingPongRenderTarget,
lodIn,
lodOut,
sigma,
'latitudinal',
poleAxis );
this._halfBlur(
pingPongRenderTarget,
cubeUVRenderTarget,
lodOut,
lodOut,
sigma,
'longitudinal',
poleAxis );
}
_halfBlur( targetIn, targetOut, lodIn, lodOut, sigmaRadians, direction, poleAxis ) {
const renderer = this._renderer;
const blurMaterial = this._blurMaterial;
if ( direction !== 'latitudinal' && direction !== 'longitudinal' ) {
console.error( 'blur direction must be either latitudinal or longitudinal!' );
}
// Number of standard deviations at which to cut off the discrete approximation.
const STANDARD_DEVIATIONS = 3;
const blurMesh = this._lodMeshes[ lodOut ];
blurMesh.material = blurMaterial;
const blurUniforms = _uniformsMap.get( blurMaterial );
const pixels = this._sizeLods[ lodIn ] - 1;
const radiansPerPixel = isFinite( sigmaRadians ) ? Math.PI / ( 2 * pixels ) : 2 * Math.PI / ( 2 * MAX_SAMPLES - 1 );
const sigmaPixels = sigmaRadians / radiansPerPixel;
const samples = isFinite( sigmaRadians ) ? 1 + Math.floor( STANDARD_DEVIATIONS * sigmaPixels ) : MAX_SAMPLES;
if ( samples > MAX_SAMPLES ) {
console.warn( `sigmaRadians, ${
sigmaRadians}, is too large and will clip, as it requested ${
samples} samples when the maximum is set to ${MAX_SAMPLES}` );
}
const weights = [];
let sum = 0;
for ( let i = 0; i < MAX_SAMPLES; ++ i ) {
const x = i / sigmaPixels;
const weight = Math.exp( - x * x / 2 );
weights.push( weight );
if ( i === 0 ) {
sum += weight;
} else if ( i < samples ) {
sum += 2 * weight;
}
}
for ( let i = 0; i < weights.length; i ++ ) {
weights[ i ] = weights[ i ] / sum;
}
targetIn.texture.frame = ( targetIn.texture.frame || 0 ) + 1;
blurUniforms.envMap.value = targetIn.texture;
blurUniforms.samples.value = samples;
blurUniforms.weights.array = weights;
blurUniforms.latitudinal.value = direction === 'latitudinal' ? 1 : 0;
if ( poleAxis ) {
blurUniforms.poleAxis.value = poleAxis;
}
const { _lodMax } = this;
blurUniforms.dTheta.value = radiansPerPixel;
blurUniforms.mipInt.value = _lodMax - lodIn;
const outputSize = this._sizeLods[ lodOut ];
const x = 3 * outputSize * ( lodOut > _lodMax - LOD_MIN ? lodOut - _lodMax + LOD_MIN : 0 );
const y = 4 * ( this._cubeSize - outputSize );
_setViewport( targetOut, x, y, 3 * outputSize, 2 * outputSize );
renderer.setRenderTarget( targetOut );
renderer.render( blurMesh, _flatCamera );
}
}
function _createPlanes( lodMax ) {
const lodPlanes = [];
const sizeLods = [];
const sigmas = [];
const lodMeshes = [];
let lod = lodMax;
const totalLods = lodMax - LOD_MIN + 1 + EXTRA_LOD_SIGMA.length;
for ( let i = 0; i < totalLods; i ++ ) {
const sizeLod = Math.pow( 2, lod );
sizeLods.push( sizeLod );
let sigma = 1.0 / sizeLod;
if ( i > lodMax - LOD_MIN ) {
sigma = EXTRA_LOD_SIGMA[ i - lodMax + LOD_MIN - 1 ];
} else if ( i === 0 ) {
sigma = 0;
}
sigmas.push( sigma );
const texelSize = 1.0 / ( sizeLod - 2 );
const min = - texelSize;
const max = 1 + texelSize;
const uv1 = [ min, min, max, min, max, max, min, min, max, max, min, max ];
const cubeFaces = 6;
const vertices = 6;
const positionSize = 3;
const uvSize = 2;
const faceIndexSize = 1;
const position = new Float32Array( positionSize * vertices * cubeFaces );
const uv = new Float32Array( uvSize * vertices * cubeFaces );
const faceIndex = new Float32Array( faceIndexSize * vertices * cubeFaces );
for ( let face = 0; face < cubeFaces; face ++ ) {
const x = ( face % 3 ) * 2 / 3 - 1;
const y = face > 2 ? 0 : - 1;
const coordinates = [
x, y, 0,
x + 2 / 3, y, 0,
x + 2 / 3, y + 1, 0,
x, y, 0,
x + 2 / 3, y + 1, 0,
x, y + 1, 0
];
const faceIdx = _faceLib[ face ];
position.set( coordinates, positionSize * vertices * faceIdx );
uv.set( uv1, uvSize * vertices * faceIdx );
const fill = [ faceIdx, faceIdx, faceIdx, faceIdx, faceIdx, faceIdx ];
faceIndex.set( fill, faceIndexSize * vertices * faceIdx );
}
const planes = new BufferGeometry();
planes.setAttribute( 'position', new BufferAttribute( position, positionSize ) );
planes.setAttribute( 'uv', new BufferAttribute( uv, uvSize ) );
planes.setAttribute( 'faceIndex', new BufferAttribute( faceIndex, faceIndexSize ) );
lodPlanes.push( planes );
lodMeshes.push( new Mesh( planes, null ) );
if ( lod > LOD_MIN ) {
lod --;
}
}
return { lodPlanes, sizeLods, sigmas, lodMeshes };
}
function _createRenderTarget( width, height, params ) {
const cubeUVRenderTarget = new RenderTarget( width, height, params );
cubeUVRenderTarget.texture.mapping = CubeUVReflectionMapping;
cubeUVRenderTarget.texture.name = 'PMREM.cubeUv';
cubeUVRenderTarget.texture.isPMREMTexture = true;
cubeUVRenderTarget.scissorTest = true;
return cubeUVRenderTarget;
}
function _setViewport( target, x, y, width, height ) {
target.viewport.set( x, y, width, height );
target.scissor.set( x, y, width, height );
}
function _getMaterial( type ) {
const material = new NodeMaterial();
material.depthTest = false;
material.depthWrite = false;
material.blending = NoBlending;
material.name = `PMREM_${ type }`;
return material;
}
function _getBlurShader( lodMax, width, height ) {
const weights = uniformArray( new Array( MAX_SAMPLES ).fill( 0 ) );
const poleAxis = uniform( new Vector3( 0, 1, 0 ) );
const dTheta = uniform( 0 );
const n = float( MAX_SAMPLES );
const latitudinal = uniform( 0 ); // false, bool
const samples = uniform( 1 ); // int
const envMap = texture( null );
const mipInt = uniform( 0 ); // int
const CUBEUV_TEXEL_WIDTH = float( 1 / width );
const CUBEUV_TEXEL_HEIGHT = float( 1 / height );
const CUBEUV_MAX_MIP = float( lodMax );
const materialUniforms = {
n,
latitudinal,
weights,
poleAxis,
outputDirection: _outputDirection,
dTheta,
samples,
envMap,
mipInt,
CUBEUV_TEXEL_WIDTH,
CUBEUV_TEXEL_HEIGHT,
CUBEUV_MAX_MIP
};
const material = _getMaterial( 'blur' );
material.fragmentNode = blur( { ...materialUniforms, latitudinal: latitudinal.equal( 1 ) } );
_uniformsMap.set( material, materialUniforms );
return material;
}
function _getCubemapMaterial( envTexture ) {
const material = _getMaterial( 'cubemap' );
material.fragmentNode = cubeTexture( envTexture, _outputDirection );
return material;
}
function _getEquirectMaterial( envTexture ) {
const material = _getMaterial( 'equirect' );
material.fragmentNode = texture( envTexture, equirectUV( _outputDirection ), 0 );
return material;
}
const rendererCache = new WeakMap();
const typeFromArray = new Map( [
[ Int8Array, 'int' ],
[ Int16Array, 'int' ],
[ Int32Array, 'int' ],
[ Uint8Array, 'uint' ],
[ Uint16Array, 'uint' ],
[ Uint32Array, 'uint' ],
[ Float32Array, 'float' ]
] );
const toFloat = ( value ) => {
if ( /e/g.test( value ) ) {
return String( value ).replace( /\+/g, '' );
} else {
value = Number( value );
return value + ( value % 1 ? '' : '.0' );
}
};
/**
* Base class for builders which generate a shader program based
* on a 3D object and its node material definition.
*/
class NodeBuilder {
/**
* Constructs a new node builder.
*
* @param {Object3D} object - The 3D object.
* @param {Renderer} renderer - The current renderer.
* @param {NodeParser} parser - A reference to a node parser.
*/
constructor( object, renderer, parser ) {
/**
* The 3D object.
*
* @type {Object3D}
*/
this.object = object;
/**
* The material of the 3D object.
*
* @type {Material?}
*/
this.material = ( object && object.material ) || null;
/**
* The geometry of the 3D object.
*
* @type {BufferGeometry?}
*/
this.geometry = ( object && object.geometry ) || null;
/**
* The current renderer.
*
* @type {Renderer}
*/
this.renderer = renderer;
/**
* A reference to a node parser.
*
* @type {NodeParser}
*/
this.parser = parser;
/**
* The scene the 3D object belongs to.
*
* @type {Scene?}
* @default null
*/
this.scene = null;
/**
* The camera the 3D object is rendered with.
*
* @type {Camera?}
* @default null
*/
this.camera = null;
/**
* A list of all nodes the builder is processing
* for this 3D object.
*
* @type {Array<Node>}
*/
this.nodes = [];
/**
* A list of all sequential nodes.
*
* @type {Array<Node>}
*/
this.sequentialNodes = [];
/**
* A list of all nodes which {@link Node#update} method should be executed.
*
* @type {Array<Node>}
*/
this.updateNodes = [];
/**
* A list of all nodes which {@link Node#updateBefore} method should be executed.
*
* @type {Array<Node>}
*/
this.updateBeforeNodes = [];
/**
* A list of all nodes which {@link Node#updateAfter} method should be executed.
*
* @type {Array<Node>}
*/
this.updateAfterNodes = [];
/**
* A dictionary that assigns each node to a unique hash.
*
* @type {Object<Number,Node>}
*/
this.hashNodes = {};
/**
* A reference to a node material observer.
*
* @type {NodeMaterialObserver?}
* @default null
*/
this.observer = null;
/**
* A reference to the current lights node.
*
* @type {LightsNode?}
* @default null
*/
this.lightsNode = null;
/**
* A reference to the current environment node.
*
* @type {Node?}
* @default null
*/
this.environmentNode = null;
/**
* A reference to the current fog node.
*
* @type {FogNode?}
* @default null
*/
this.fogNode = null;
/**
* The current clipping context.
*
* @type {ClippingContext?}
*/
this.clippingContext = null;
/**
* The generated vertex shader.
*
* @type {String?}
*/
this.vertexShader = null;
/**
* The generated fragment shader.
*
* @type {String?}
*/
this.fragmentShader = null;
/**
* The generated compute shader.
*
* @type {String?}
*/
this.computeShader = null;
/**
* Nodes used in the primary flow of code generation.
*
* @type {Object<String,Array<Node>>}
*/
this.flowNodes = { vertex: [], fragment: [], compute: [] };
/**
* Nodes code from `.flowNodes`.
*
* @type {Object<String,String>}
*/
this.flowCode = { vertex: '', fragment: '', compute: '' };
/**
* This dictionary holds the node uniforms of the builder.
* The uniforms are maintained in an array for each shader stage.
*
* @type {Object}
*/
this.uniforms = { vertex: [], fragment: [], compute: [], index: 0 };
/**
* This dictionary holds the output structs of the builder.
* The structs are maintained in an array for each shader stage.
*
* @type {Object}
*/
this.structs = { vertex: [], fragment: [], compute: [], index: 0 };
/**
* This dictionary holds the bindings for each shader stage.
*
* @type {Object}
*/
this.bindings = { vertex: {}, fragment: {}, compute: {} };
/**
* This dictionary maintains the binding indices per bind group.
*
* @type {Object}
*/
this.bindingsIndexes = {};
/**
* Reference to the array of bind groups.
*
* @type {Array<BindGroup>?}
*/
this.bindGroups = null;
/**
* This array holds the node attributes of this builder
* created via {@link AttributeNode}.
*
* @type {Array<NodeAttribute>}
*/
this.attributes = [];
/**
* This array holds the node attributes of this builder
* created via {@link BufferAttributeNode}.
*
* @type {Array<NodeAttribute>}
*/
this.bufferAttributes = [];
/**
* This array holds the node varyings of this builder.
*
* @type {Array<NodeVarying>}
*/
this.varyings = [];
/**
* This dictionary holds the (native) node codes of this builder.
* The codes are maintained in an array for each shader stage.
*
* @type {Object<String,Array<NodeCode>>}
*/
this.codes = {};
/**
* This dictionary holds the node variables of this builder.
* The variables are maintained in an array for each shader stage.
*
* @type {Object<String,Array<NodeVar>>}
*/
this.vars = {};
/**
* Current code flow.
* All code generated in this stack will be stored in `.flow`.
*
* @type {{code: String}}
*/
this.flow = { code: '' };
/**
* A chain of nodes.
* Used to check recursive calls in node-graph.
*
* @type {Array<Node>}
*/
this.chaining = [];
/**
* The current stack.
* This reflects the current process in the code block hierarchy,
* it is useful to know if the current process is inside a conditional for example.
*
* @type {StackNode}
*/
this.stack = stack();
/**
* List of stack nodes.
* The current stack hierarchy is stored in an array.
*
* @type {Array<StackNode>}
*/
this.stacks = [];
/**
* A tab value. Used for shader string generation.
*
* @type {String}
* @default '\t'
*/
this.tab = '\t';
/**
* Reference to the current function node.
*
* @type {FunctionNode?}
* @default null
*/
this.currentFunctionNode = null;
/**
* The builder's context.
*
* @type {Object}
*/
this.context = {
material: this.material
};
/**
* The builder's cache.
*
* @type {NodeCache}
*/
this.cache = new NodeCache();
/**
* Since the {@link NodeBuilder#cache} might be temporarily
* overwritten by other caches, this member retains the reference
* to the builder's own cache.
*
* @type {NodeCache}
* @default this.cache
*/
this.globalCache = this.cache;
this.flowsData = new WeakMap();
/**
* The current shader stage.
*
* @type {('vertex'|'fragment'|'compute'|'any')?}
*/
this.shaderStage = null;
/**
* The current build stage.
*
* @type {('setup'|'analyze'|'generate')?}
*/
this.buildStage = null;
/**
* Whether comparison in shader code are generated with methods or not.
*
* @type {Boolean}
* @default false
*/
this.useComparisonMethod = false;
}
/**
* Returns the bind groups of the current renderer.
*
* @return {ChainMap} The cache.
*/
getBindGroupsCache() {
let bindGroupsCache = rendererCache.get( this.renderer );
if ( bindGroupsCache === undefined ) {
bindGroupsCache = new ChainMap();
rendererCache.set( this.renderer, bindGroupsCache );
}
return bindGroupsCache;
}
/**
* Factory method for creating an instance of {@link RenderTarget} with the given
* dimensions and options.
*
* @param {Number} width - The width of the render target.
* @param {Number} height - The height of the render target.
* @param {Object} options - The options of the render target.
* @return {RenderTarget} The render target.
*/
createRenderTarget( width, height, options ) {
return new RenderTarget( width, height, options );
}
/**
* Factory method for creating an instance of {@link CubeRenderTarget} with the given
* dimensions and options.
*
* @param {Number} size - The size of the cube render target.
* @param {Object} options - The options of the cube render target.
* @return {CubeRenderTarget} The cube render target.
*/
createCubeRenderTarget( size, options ) {
return new CubeRenderTarget( size, options );
}
/**
* Factory method for creating an instance of {@link PMREMGenerator}.
*
* @return {PMREMGenerator} The PMREM generator.
*/
createPMREMGenerator() {
// TODO: Move Materials.js to outside of the Nodes.js in order to remove this function and improve tree-shaking support
return new PMREMGenerator( this.renderer );
}
/**
* Whether the given node is included in the internal array of nodes or not.
*
* @param {Node} node - The node to test.
* @return {Boolean} Whether the given node is included in the internal array of nodes or not.
*/
includes( node ) {
return this.nodes.includes( node );
}
/**
* Returns the output struct name which is required by
* {@link module:OutputStructNode}.
*
* @abstract
* @return {String} The name of the output struct.
*/
getOutputStructName() {}
/**
* Returns a bind group for the given group name and binding.
*
* @private
* @param {String} groupName - The group name.
* @param {Array<NodeUniformsGroup>} bindings - List of bindings.
* @return {BindGroup} The bind group
*/
_getBindGroup( groupName, bindings ) {
const bindGroupsCache = this.getBindGroupsCache();
//
const bindingsArray = [];
let sharedGroup = true;
for ( const binding of bindings ) {
bindingsArray.push( binding );
sharedGroup = sharedGroup && binding.groupNode.shared !== true;
}
//
let bindGroup;
if ( sharedGroup ) {
bindGroup = bindGroupsCache.get( bindingsArray );
if ( bindGroup === undefined ) {
bindGroup = new BindGroup( groupName, bindingsArray, this.bindingsIndexes[ groupName ].group, bindingsArray );
bindGroupsCache.set( bindingsArray, bindGroup );
}
} else {
bindGroup = new BindGroup( groupName, bindingsArray, this.bindingsIndexes[ groupName ].group, bindingsArray );
}
return bindGroup;
}
/**
* Returns an array of node uniform groups for the given group name and shader stage.
*
* @param {String} groupName - The group name.
* @param {('vertex'|'fragment'|'compute'|'any')} shaderStage - The shader stage.
* @return {Array<NodeUniformsGroup>} The array of node uniform groups.
*/
getBindGroupArray( groupName, shaderStage ) {
const bindings = this.bindings[ shaderStage ];
let bindGroup = bindings[ groupName ];
if ( bindGroup === undefined ) {
if ( this.bindingsIndexes[ groupName ] === undefined ) {
this.bindingsIndexes[ groupName ] = { binding: 0, group: Object.keys( this.bindingsIndexes ).length };
}
bindings[ groupName ] = bindGroup = [];
}
return bindGroup;
}
/**
* Returns a list bindings of all shader stages separated by groups.
*
* @return {Array<BindGroup>} The list of bindings.
*/
getBindings() {
let bindingsGroups = this.bindGroups;
if ( bindingsGroups === null ) {
const groups = {};
const bindings = this.bindings;
for ( const shaderStage of shaderStages ) {
for ( const groupName in bindings[ shaderStage ] ) {
const uniforms = bindings[ shaderStage ][ groupName ];
const groupUniforms = groups[ groupName ] || ( groups[ groupName ] = [] );
groupUniforms.push( ...uniforms );
}
}
bindingsGroups = [];
for ( const groupName in groups ) {
const group = groups[ groupName ];
const bindingsGroup = this._getBindGroup( groupName, group );
bindingsGroups.push( bindingsGroup );
}
this.bindGroups = bindingsGroups;
}
return bindingsGroups;
}
/**
* Sorts the bind groups and updates {@link NodeBuilder#bindingsIndexes}.
*/
sortBindingGroups() {
const bindingsGroups = this.getBindings();
bindingsGroups.sort( ( a, b ) => ( a.bindings[ 0 ].groupNode.order - b.bindings[ 0 ].groupNode.order ) );
for ( let i = 0; i < bindingsGroups.length; i ++ ) {
const bindingGroup = bindingsGroups[ i ];
this.bindingsIndexes[ bindingGroup.name ].group = i;
bindingGroup.index = i;
}
}
/**
* The builder maintains each node in a hash-based dictionary.
* This method sets the given node (value) with the given hash (key) into this dictionary.
*
* @param {Node} node - The node to add.
* @param {Number} hash - The hash of the node.
*/
setHashNode( node, hash ) {
this.hashNodes[ hash ] = node;
}
/**
* Adds a node to this builder.
*
* @param {Node} node - The node to add.
*/
addNode( node ) {
if ( this.nodes.includes( node ) === false ) {
this.nodes.push( node );
this.setHashNode( node, node.getHash( this ) );
}
}
/**
* It is used to add Nodes that will be used as FRAME and RENDER events,
* and need to follow a certain sequence in the calls to work correctly.
* This function should be called after 'setup()' in the 'build()' process to ensure that the child nodes are processed first.
*
* @param {Node} node - The node to add.
*/
addSequentialNode( node ) {
if ( this.sequentialNodes.includes( node ) === false ) {
this.sequentialNodes.push( node );
}
}
/**
* Checks the update types of nodes
*/
buildUpdateNodes() {
for ( const node of this.nodes ) {
const updateType = node.getUpdateType();
if ( updateType !== NodeUpdateType.NONE ) {
this.updateNodes.push( node.getSelf() );
}
}
for ( const node of this.sequentialNodes ) {
const updateBeforeType = node.getUpdateBeforeType();
const updateAfterType = node.getUpdateAfterType();
if ( updateBeforeType !== NodeUpdateType.NONE ) {
this.updateBeforeNodes.push( node.getSelf() );
}
if ( updateAfterType !== NodeUpdateType.NONE ) {
this.updateAfterNodes.push( node.getSelf() );
}
}
}
/**
* A reference the current node which is the
* last node in the chain of nodes.
*
* @type {Node}
*/
get currentNode() {
return this.chaining[ this.chaining.length - 1 ];
}
/**
* Whether the given texture is filtered or not.
*
* @param {Texture} texture - The texture to check.
* @return {Boolean} Whether the given texture is filtered or not.
*/
isFilteredTexture( texture ) {
return ( texture.magFilter === LinearFilter || texture.magFilter === LinearMipmapNearestFilter || texture.magFilter === NearestMipmapLinearFilter || texture.magFilter === LinearMipmapLinearFilter ||
texture.minFilter === LinearFilter || texture.minFilter === LinearMipmapNearestFilter || texture.minFilter === NearestMipmapLinearFilter || texture.minFilter === LinearMipmapLinearFilter );
}
/**
* Adds the given node to the internal node chain.
* This is used to check recursive calls in node-graph.
*
* @param {Node} node - The node to add.
*/
addChain( node ) {
/*
if ( this.chaining.indexOf( node ) !== - 1 ) {
console.warn( 'Recursive node: ', node );
}
*/
this.chaining.push( node );
}
/**
* Removes the given node from the internal node chain.
*
* @param {Node} node - The node to remove.
*/
removeChain( node ) {
const lastChain = this.chaining.pop();
if ( lastChain !== node ) {
throw new Error( 'NodeBuilder: Invalid node chaining!' );
}
}
/**
* Returns the native shader method name for a given generic name. E.g.
* the method name `textureDimensions` matches the WGSL name but must be
* resolved to `textureSize` in GLSL.
*
* @abstract
* @param {String} method - The method name to resolve.
* @return {String} The resolved method name.
*/
getMethod( method ) {
return method;
}
/**
* Returns a node for the given hash, see {@link NodeBuilder#setHashNode}.
*
* @param {Number} hash - The hash of the node.
* @return {Node} The found node.
*/
getNodeFromHash( hash ) {
return this.hashNodes[ hash ];
}
/**
* Adds the Node to a target flow so that it can generate code in the 'generate' process.
*
* @param {('vertex'|'fragment'|'compute')} shaderStage - The shader stage.
* @param {Node} node - The node to add.
* @return {Node} The node.
*/
addFlow( shaderStage, node ) {
this.flowNodes[ shaderStage ].push( node );
return node;
}
/**
* Sets builder's context.
*
* @param {Object} context - The context to set.
*/
setContext( context ) {
this.context = context;
}
/**
* Returns the builder's current context.
*
* @return {Object} The builder's current context.
*/
getContext() {
return this.context;
}
/**
* Gets a context used in shader construction that can be shared across different materials.
* This is necessary since the renderer cache can reuse shaders generated in one material and use them in another.
*
* @return {Object} The builder's current context without material.
*/
getSharedContext() {
({ ...this.context });
return this.context;
}
/**
* Sets builder's cache.
*
* @param {NodeCache} cache - The cache to set.
*/
setCache( cache ) {
this.cache = cache;
}
/**
* Returns the builder's current cache.
*
* @return {NodeCache} The builder's current cache.
*/
getCache() {
return this.cache;
}
/**
* Returns a cache for the given node.
*
* @param {Node} node - The node.
* @param {Boolean} [parent=true] - Whether this node refers to a shared parent cache or not.
* @return {NodeCache} The cache.
*/
getCacheFromNode( node, parent = true ) {
const data = this.getDataFromNode( node );
if ( data.cache === undefined ) data.cache = new NodeCache( parent ? this.getCache() : null );
return data.cache;
}
/**
* Whether the requested feature is available or not.
*
* @abstract
* @param {String} name - The requested feature.
* @return {Boolean} Whether the requested feature is supported or not.
*/
isAvailable( /*name*/ ) {
return false;
}
/**
* Returns the vertexIndex input variable as a native shader string.
*
* @abstract
* @return {String} The instanceIndex shader string.
*/
getVertexIndex() {
console.warn( 'Abstract function.' );
}
/**
* Returns the instanceIndex input variable as a native shader string.
*
* @abstract
* @return {String} The instanceIndex shader string.
*/
getInstanceIndex() {
console.warn( 'Abstract function.' );
}
/**
* Returns the drawIndex input variable as a native shader string.
* Only relevant for WebGL and its `WEBGL_multi_draw` extension.
*
* @abstract
* @return {String} The drawIndex shader string.
*/
getDrawIndex() {
console.warn( 'Abstract function.' );
}
/**
* Returns the frontFacing input variable as a native shader string.
*
* @abstract
* @return {String} The frontFacing shader string.
*/
getFrontFacing() {
console.warn( 'Abstract function.' );
}
/**
* Returns the fragCoord input variable as a native shader string.
*
* @abstract
* @return {String} The fragCoord shader string.
*/
getFragCoord() {
console.warn( 'Abstract function.' );
}
/**
* Whether to flip texture data along its vertical axis or not. WebGL needs
* this method evaluate to `true`, WebGPU to `false`.
*
* @abstract
* @return {Boolean} Whether to flip texture data along its vertical axis or not.
*/
isFlipY() {
return false;
}
/**
* Calling this method increases the usage count for the given node by one.
*
* @param {Node} node - The node to increase the usage count for.
* @return {Number} The updated usage count.
*/
increaseUsage( node ) {
const nodeData = this.getDataFromNode( node );
nodeData.usageCount = nodeData.usageCount === undefined ? 1 : nodeData.usageCount + 1;
return nodeData.usageCount;
}
/**
* Generates a texture sample shader string for the given texture data.
*
* @abstract
* @param {Texture} texture - The texture.
* @param {String} textureProperty - The texture property name.
* @param {String} uvSnippet - Snippet defining the texture coordinates.
* @return {String} The generated shader string.
*/
generateTexture( /* texture, textureProperty, uvSnippet */ ) {
console.warn( 'Abstract function.' );
}
/**
* Generates a texture LOD shader string for the given texture data.
*
* @abstract
* @param {Texture} texture - The texture.
* @param {String} textureProperty - The texture property name.
* @param {String} uvSnippet - Snippet defining the texture coordinates.
* @param {String?} depthSnippet - Snippet defining the 0-based texture array index to sample.
* @param {String} levelSnippet - Snippet defining the mip level.
* @return {String} The generated shader string.
*/
generateTextureLod( /* texture, textureProperty, uvSnippet, depthSnippet, levelSnippet */ ) {
console.warn( 'Abstract function.' );
}
/**
* Generates the array declaration string.
*
* @param {String} type - The type.
* @param {Number?} [count] - The count.
* @return {String} The generated value as a shader string.
*/
generateArrayDeclaration( type, count ) {
return this.getType( type ) + '[ ' + count + ' ]';
}
/**
* Generates the array shader string for the given type and value.
*
* @param {String} type - The type.
* @param {Number?} [count] - The count.
* @param {Array<Node>?} [values=null] - The default values.
* @return {String} The generated value as a shader string.
*/
generateArray( type, count, values = null ) {
let snippet = this.generateArrayDeclaration( type, count ) + '( ';
for ( let i = 0; i < count; i ++ ) {
const value = values ? values[ i ] : null;
if ( value !== null ) {
snippet += value.build( this, type );
} else {
snippet += this.generateConst( type );
}
if ( i < count - 1 ) snippet += ', ';
}
snippet += ' )';
return snippet;
}
/**
* Generates the struct shader string.
*
* @param {String} type - The type.
* @param {Array<Object>} [membersLayout] - The count.
* @param {Array<Node>?} [values=null] - The default values.
* @return {String} The generated value as a shader string.
*/
generateStruct( type, membersLayout, values = null ) {
const snippets = [];
for ( const member of membersLayout ) {
const { name, type } = member;
if ( values && values[ name ] && values[ name ].isNode ) {
snippets.push( values[ name ].build( this, type ) );
} else {
snippets.push( this.generateConst( type ) );
}
}
return type + '( ' + snippets.join( ', ' ) + ' )';
}
/**
* Generates the shader string for the given type and value.
*
* @param {String} type - The type.
* @param {Any?} [value=null] - The value.
* @return {String} The generated value as a shader string.
*/
generateConst( type, value = null ) {
if ( value === null ) {
if ( type === 'float' || type === 'int' || type === 'uint' ) value = 0;
else if ( type === 'bool' ) value = false;
else if ( type === 'color' ) value = new Color();
else if ( type === 'vec2' ) value = new Vector2();
else if ( type === 'vec3' ) value = new Vector3();
else if ( type === 'vec4' ) value = new Vector4();
}
if ( type === 'float' ) return toFloat( value );
if ( type === 'int' ) return `${ Math.round( value ) }`;
if ( type === 'uint' ) return value >= 0 ? `${ Math.round( value ) }u` : '0u';
if ( type === 'bool' ) return value ? 'true' : 'false';
if ( type === 'color' ) return `${ this.getType( 'vec3' ) }( ${ toFloat( value.r ) }, ${ toFloat( value.g ) }, ${ toFloat( value.b ) } )`;
const typeLength = this.getTypeLength( type );
const componentType = this.getComponentType( type );
const generateConst = value => this.generateConst( componentType, value );
if ( typeLength === 2 ) {
return `${ this.getType( type ) }( ${ generateConst( value.x ) }, ${ generateConst( value.y ) } )`;
} else if ( typeLength === 3 ) {
return `${ this.getType( type ) }( ${ generateConst( value.x ) }, ${ generateConst( value.y ) }, ${ generateConst( value.z ) } )`;
} else if ( typeLength === 4 ) {
return `${ this.getType( type ) }( ${ generateConst( value.x ) }, ${ generateConst( value.y ) }, ${ generateConst( value.z ) }, ${ generateConst( value.w ) } )`;
} else if ( typeLength > 4 && value && ( value.isMatrix3 || value.isMatrix4 ) ) {
return `${ this.getType( type ) }( ${ value.elements.map( generateConst ).join( ', ' ) } )`;
} else if ( typeLength > 4 ) {
return `${ this.getType( type ) }()`;
}
throw new Error( `NodeBuilder: Type '${type}' not found in generate constant attempt.` );
}
/**
* It might be necessary to convert certain data types to different ones
* so this method can be used to hide the conversion.
*
* @param {String} type - The type.
* @return {String} The updated type.
*/
getType( type ) {
if ( type === 'color' ) return 'vec3';
return type;
}
/**
* Whether the given attribute name is defined in the geometry or not.
*
* @param {String} name - The attribute name.
* @return {Boolean} Whether the given attribute name is defined in the geometry.
*/
hasGeometryAttribute( name ) {
return this.geometry && this.geometry.getAttribute( name ) !== undefined;
}
/**
* Returns a node attribute for the given name and type.
*
* @param {String} name - The attribute's name.
* @param {String} type - The attribute's type.
* @return {NodeAttribute} The node attribute.
*/
getAttribute( name, type ) {
const attributes = this.attributes;
// find attribute
for ( const attribute of attributes ) {
if ( attribute.name === name ) {
return attribute;
}
}
// create a new if no exist
const attribute = new NodeAttribute( name, type );
attributes.push( attribute );
return attribute;
}
/**
* Returns for the given node and shader stage the property name for the shader.
*
* @param {Node} node - The node.
* @param {('vertex'|'fragment'|'compute'|'any')} shaderStage - The shader stage.
* @return {String} The property name.
*/
getPropertyName( node/*, shaderStage*/ ) {
return node.name;
}
/**
* Whether the given type is a vector type or not.
*
* @param {String} type - The type to check.
* @return {Boolean} Whether the given type is a vector type or not.
*/
isVector( type ) {
return /vec\d/.test( type );
}
/**
* Whether the given type is a matrix type or not.
*
* @param {String} type - The type to check.
* @return {Boolean} Whether the given type is a matrix type or not.
*/
isMatrix( type ) {
return /mat\d/.test( type );
}
/**
* Whether the given type is a reference type or not.
*
* @param {String} type - The type to check.
* @return {Boolean} Whether the given type is a reference type or not.
*/
isReference( type ) {
return type === 'void' || type === 'property' || type === 'sampler' || type === 'texture' || type === 'cubeTexture' || type === 'storageTexture' || type === 'depthTexture' || type === 'texture3D';
}
/**
* Checks if the given texture requires a manual conversion to the working color space.
*
* @abstract
* @param {Texture} texture - The texture to check.
* @return {Boolean} Whether the given texture requires a conversion to working color space or not.
*/
needsToWorkingColorSpace( /*texture*/ ) {
return false;
}
/**
* Returns the component type of a given texture.
*
* @param {Texture} texture - The texture.
* @return {String} The component type.
*/
getComponentTypeFromTexture( texture ) {
const type = texture.type;
if ( texture.isDataTexture ) {
if ( type === IntType ) return 'int';
if ( type === UnsignedIntType ) return 'uint';
}
return 'float';
}
/**
* Returns the element type for a given type.
*
* @param {String} type - The type.
* @return {String} The element type.
*/
getElementType( type ) {
if ( type === 'mat2' ) return 'vec2';
if ( type === 'mat3' ) return 'vec3';
if ( type === 'mat4' ) return 'vec4';
return this.getComponentType( type );
}
/**
* Returns the component type for a given type.
*
* @param {String} type - The type.
* @return {String} The component type.
*/
getComponentType( type ) {
type = this.getVectorType( type );
if ( type === 'float' || type === 'bool' || type === 'int' || type === 'uint' ) return type;
const componentType = /(b|i|u|)(vec|mat)([2-4])/.exec( type );
if ( componentType === null ) return null;
if ( componentType[ 1 ] === 'b' ) return 'bool';
if ( componentType[ 1 ] === 'i' ) return 'int';
if ( componentType[ 1 ] === 'u' ) return 'uint';
return 'float';
}
/**
* Returns the vector type for a given type.
*
* @param {String} type - The type.
* @return {String} The vector type.
*/
getVectorType( type ) {
if ( type === 'color' ) return 'vec3';
if ( type === 'texture' || type === 'cubeTexture' || type === 'storageTexture' || type === 'texture3D' ) return 'vec4';
return type;
}
/**
* Returns the data type for the given the length and component type.
*
* @param {Number} length - The length.
* @param {String} [componentType='float'] - The component type.
* @return {String} The type.
*/
getTypeFromLength( length, componentType = 'float' ) {
if ( length === 1 ) return componentType;
let baseType = getTypeFromLength( length );
const prefix = componentType === 'float' ? '' : componentType[ 0 ];
// fix edge case for mat2x2 being same size as vec4
if ( /mat2/.test( componentType ) === true ) {
baseType = baseType.replace( 'vec', 'mat' );
}
return prefix + baseType;
}
/**
* Returns the type for a given typed array.
*
* @param {TypedArray} array - The typed array.
* @return {String} The type.
*/
getTypeFromArray( array ) {
return typeFromArray.get( array.constructor );
}
/**
* Returns the type for a given buffer attribute.
*
* @param {BufferAttribute} attribute - The buffer attribute.
* @return {String} The type.
*/
getTypeFromAttribute( attribute ) {
let dataAttribute = attribute;
if ( attribute.isInterleavedBufferAttribute ) dataAttribute = attribute.data;
const array = dataAttribute.array;
const itemSize = attribute.itemSize;
const normalized = attribute.normalized;
let arrayType;
if ( ! ( attribute instanceof Float16BufferAttribute ) && normalized !== true ) {
arrayType = this.getTypeFromArray( array );
}
return this.getTypeFromLength( itemSize, arrayType );
}
/**
* Returns the length for the given data type.
*
* @param {String} type - The data type.
* @return {Number} The length.
*/
getTypeLength( type ) {
const vecType = this.getVectorType( type );
const vecNum = /vec([2-4])/.exec( vecType );
if ( vecNum !== null ) return Number( vecNum[ 1 ] );
if ( vecType === 'float' || vecType === 'bool' || vecType === 'int' || vecType === 'uint' ) return 1;
if ( /mat2/.test( type ) === true ) return 4;
if ( /mat3/.test( type ) === true ) return 9;
if ( /mat4/.test( type ) === true ) return 16;
return 0;
}
/**
* Returns the vector type for a given matrix type.
*
* @param {String} type - The matrix type.
* @return {String} The vector type.
*/
getVectorFromMatrix( type ) {
return type.replace( 'mat', 'vec' );
}
/**
* For a given type this method changes the component type to the
* given value. E.g. `vec4` should be changed to the new component type
* `uint` which results in `uvec4`.
*
* @param {String} type - The type.
* @param {String} newComponentType - The new component type.
* @return {String} The new type.
*/
changeComponentType( type, newComponentType ) {
return this.getTypeFromLength( this.getTypeLength( type ), newComponentType );
}
/**
* Returns the integer type pendant for the given type.
*
* @param {String} type - The type.
* @return {String} The integer type.
*/
getIntegerType( type ) {
const componentType = this.getComponentType( type );
if ( componentType === 'int' || componentType === 'uint' ) return type;
return this.changeComponentType( type, 'int' );
}
/**
* Adds a stack node to the internal stack.
*
* @return {StackNode} The added stack node.
*/
addStack() {
this.stack = stack( this.stack );
this.stacks.push( getCurrentStack() || this.stack );
setCurrentStack( this.stack );
return this.stack;
}
/**
* Removes the last stack node from the internal stack.
*
* @return {StackNode} The removed stack node.
*/
removeStack() {
const lastStack = this.stack;
this.stack = lastStack.parent;
setCurrentStack( this.stacks.pop() );
return lastStack;
}
/**
* The builder maintains (cached) data for each node during the building process. This method
* can be used to get these data for a specific shader stage and cache.
*
* @param {Node} node - The node to get the data for.
* @param {('vertex'|'fragment'|'compute'|'any')} [shaderStage=this.shaderStage] - The shader stage.
* @param {NodeCache?} cache - An optional cache.
* @return {Object} The node data.
*/
getDataFromNode( node, shaderStage = this.shaderStage, cache = null ) {
cache = cache === null ? ( node.isGlobal( this ) ? this.globalCache : this.cache ) : cache;
let nodeData = cache.getData( node );
if ( nodeData === undefined ) {
nodeData = {};
cache.setData( node, nodeData );
}
if ( nodeData[ shaderStage ] === undefined ) nodeData[ shaderStage ] = {};
return nodeData[ shaderStage ];
}
/**
* Returns the properties for the given node and shader stage.
*
* @param {Node} node - The node to get the properties for.
* @param {('vertex'|'fragment'|'compute'|'any')} [shaderStage='any'] - The shader stage.
* @return {Object} The node properties.
*/
getNodeProperties( node, shaderStage = 'any' ) {
const nodeData = this.getDataFromNode( node, shaderStage );
return nodeData.properties || ( nodeData.properties = { outputNode: null } );
}
/**
* Returns an instance of {@link NodeAttribute} for the given buffer attribute node.
*
* @param {BufferAttributeNode} node - The buffer attribute node.
* @param {String} type - The node type.
* @return {NodeAttribute} The node attribute.
*/
getBufferAttributeFromNode( node, type ) {
const nodeData = this.getDataFromNode( node );
let bufferAttribute = nodeData.bufferAttribute;
if ( bufferAttribute === undefined ) {
const index = this.uniforms.index ++;
bufferAttribute = new NodeAttribute( 'nodeAttribute' + index, type, node );
this.bufferAttributes.push( bufferAttribute );
nodeData.bufferAttribute = bufferAttribute;
}
return bufferAttribute;
}
/**
* Returns an instance of {@link StructType} for the given output struct node.
*
* @param {OutputStructNode} node - The output struct node.
* @param {Array<Object>} membersLayout - The output struct types.
* @param {String?} [name=null] - The name of the struct.
* @param {('vertex'|'fragment'|'compute'|'any')} [shaderStage=this.shaderStage] - The shader stage.
* @return {StructType} The struct type attribute.
*/
getStructTypeFromNode( node, membersLayout, name = null, shaderStage = this.shaderStage ) {
const nodeData = this.getDataFromNode( node, shaderStage );
let structType = nodeData.structType;
if ( structType === undefined ) {
const index = this.structs.index ++;
if ( name === null ) name = 'StructType' + index;
structType = new StructType( name, membersLayout );
this.structs[ shaderStage ].push( structType );
nodeData.structType = structType;
}
return structType;
}
/**
* Returns an instance of {@link StructType} for the given output struct node.
*
* @param {OutputStructNode} node - The output struct node.
* @param {Array<Object>} membersLayout - The output struct types.
* @return {StructType} The struct type attribute.
*/
getOutputStructTypeFromNode( node, membersLayout ) {
const structType = this.getStructTypeFromNode( node, membersLayout, 'OutputType', 'fragment' );
structType.output = true;
return structType;
}
/**
* Returns an instance of {@link NodeUniform} for the given uniform node.
*
* @param {UniformNode} node - The uniform node.
* @param {String} type - The uniform type.
* @param {('vertex'|'fragment'|'compute'|'any')} [shaderStage=this.shaderStage] - The shader stage.
* @param {String?} name - The name of the uniform.
* @return {NodeUniform} The node uniform.
*/
getUniformFromNode( node, type, shaderStage = this.shaderStage, name = null ) {
const nodeData = this.getDataFromNode( node, shaderStage, this.globalCache );
let nodeUniform = nodeData.uniform;
if ( nodeUniform === undefined ) {
const index = this.uniforms.index ++;
nodeUniform = new NodeUniform( name || ( 'nodeUniform' + index ), type, node );
this.uniforms[ shaderStage ].push( nodeUniform );
nodeData.uniform = nodeUniform;
}
return nodeUniform;
}
/**
* Returns the array length.
*
* @param {Node} node - The node.
* @return {Number?} The array length.
*/
getArrayCount( node ) {
let count = null;
if ( node.isArrayNode ) count = node.count;
else if ( node.isVarNode && node.node.isArrayNode ) count = node.node.count;
return count;
}
/**
* Returns an instance of {@link NodeVar} for the given variable node.
*
* @param {VarNode} node - The variable node.
* @param {String?} name - The variable's name.
* @param {String} [type=node.getNodeType( this )] - The variable's type.
* @param {('vertex'|'fragment'|'compute'|'any')} [shaderStage=this.shaderStage] - The shader stage.
* @param {Boolean} [readOnly=false] - Whether the variable is read-only or not.
*
* @return {NodeVar} The node variable.
*/
getVarFromNode( node, name = null, type = node.getNodeType( this ), shaderStage = this.shaderStage, readOnly = false ) {
const nodeData = this.getDataFromNode( node, shaderStage );
let nodeVar = nodeData.variable;
if ( nodeVar === undefined ) {
const idNS = readOnly ? '_const' : '_var';
const vars = this.vars[ shaderStage ] || ( this.vars[ shaderStage ] = [] );
const id = this.vars[ idNS ] || ( this.vars[ idNS ] = 0 );
if ( name === null ) {
name = ( readOnly ? 'nodeConst' : 'nodeVar' ) + id;
this.vars[ idNS ] ++;
}
//
const count = this.getArrayCount( node );
nodeVar = new NodeVar( name, type, readOnly, count );
if ( ! readOnly ) {
vars.push( nodeVar );
}
nodeData.variable = nodeVar;
}
return nodeVar;
}
/**
* Returns whether a Node or its flow is deterministic, useful for use in `const`.
*
* @param {Node} node - The varying node.
* @return {Boolean} Returns true if deterministic.
*/
isDeterministic( node ) {
if ( node.isMathNode ) {
return this.isDeterministic( node.aNode ) &&
( node.bNode ? this.isDeterministic( node.bNode ) : true ) &&
( node.cNode ? this.isDeterministic( node.cNode ) : true );
} else if ( node.isOperatorNode ) {
return this.isDeterministic( node.aNode ) &&
( node.bNode ? this.isDeterministic( node.bNode ) : true );
} else if ( node.isArrayNode ) {
if ( node.values !== null ) {
for ( const n of node.values ) {
if ( ! this.isDeterministic( n ) ) {
return false;
}
}
}
return true;
} else if ( node.isConstNode ) {
return true;
}
return false;
}
/**
* Returns an instance of {@link NodeVarying} for the given varying node.
*
* @param {(VaryingNode|PropertyNode)} node - The varying node.
* @param {String?} name - The varying's name.
* @param {String} [type=node.getNodeType( this )] - The varying's type.
* @return {NodeVar} The node varying.
*/
getVaryingFromNode( node, name = null, type = node.getNodeType( this ) ) {
const nodeData = this.getDataFromNode( node, 'any' );
let nodeVarying = nodeData.varying;
if ( nodeVarying === undefined ) {
const varyings = this.varyings;
const index = varyings.length;
if ( name === null ) name = 'nodeVarying' + index;
nodeVarying = new NodeVarying( name, type );
varyings.push( nodeVarying );
nodeData.varying = nodeVarying;
}
return nodeVarying;
}
/**
* Returns an instance of {@link NodeCode} for the given code node.
*
* @param {CodeNode} node - The code node.
* @param {String} type - The node type.
* @param {('vertex'|'fragment'|'compute'|'any')} [shaderStage=this.shaderStage] - The shader stage.
* @return {NodeCode} The node code.
*/
getCodeFromNode( node, type, shaderStage = this.shaderStage ) {
const nodeData = this.getDataFromNode( node );
let nodeCode = nodeData.code;
if ( nodeCode === undefined ) {
const codes = this.codes[ shaderStage ] || ( this.codes[ shaderStage ] = [] );
const index = codes.length;
nodeCode = new NodeCode( 'nodeCode' + index, type );
codes.push( nodeCode );
nodeData.code = nodeCode;
}
return nodeCode;
}
/**
* Adds a code flow based on the code-block hierarchy.
* This is used so that code-blocks like If,Else create their variables locally if the Node
* is only used inside one of these conditionals in the current shader stage.
*
* @param {Node} node - The node to add.
* @param {Node} nodeBlock - Node-based code-block. Usually 'ConditionalNode'.
*/
addFlowCodeHierarchy( node, nodeBlock ) {
const { flowCodes, flowCodeBlock } = this.getDataFromNode( node );
let needsFlowCode = true;
let nodeBlockHierarchy = nodeBlock;
while ( nodeBlockHierarchy ) {
if ( flowCodeBlock.get( nodeBlockHierarchy ) === true ) {
needsFlowCode = false;
break;
}
nodeBlockHierarchy = this.getDataFromNode( nodeBlockHierarchy ).parentNodeBlock;
}
if ( needsFlowCode ) {
for ( const flowCode of flowCodes ) {
this.addLineFlowCode( flowCode );
}
}
}
/**
* Add a inline-code to the current flow code-block.
*
* @param {Node} node - The node to add.
* @param {String} code - The code to add.
* @param {Node} nodeBlock - Current ConditionalNode
*/
addLineFlowCodeBlock( node, code, nodeBlock ) {
const nodeData = this.getDataFromNode( node );
const flowCodes = nodeData.flowCodes || ( nodeData.flowCodes = [] );
const codeBlock = nodeData.flowCodeBlock || ( nodeData.flowCodeBlock = new WeakMap() );
flowCodes.push( code );
codeBlock.set( nodeBlock, true );
}
/**
* Add a inline-code to the current flow.
*
* @param {String} code - The code to add.
* @param {Node?} [node= null] - Optional Node, can help the system understand if the Node is part of a code-block.
* @return {NodeBuilder} A reference to this node builder.
*/
addLineFlowCode( code, node = null ) {
if ( code === '' ) return this;
if ( node !== null && this.context.nodeBlock ) {
this.addLineFlowCodeBlock( node, code, this.context.nodeBlock );
}
code = this.tab + code;
if ( ! /;\s*$/.test( code ) ) {
code = code + ';\n';
}
this.flow.code += code;
return this;
}
/**
* Adds a code to the current code flow.
*
* @param {String} code - Shader code.
* @return {NodeBuilder} A reference to this node builder.
*/
addFlowCode( code ) {
this.flow.code += code;
return this;
}
/**
* Add tab in the code that will be generated so that other snippets respect the current tabulation.
* Typically used in codes with If,Else.
*
* @return {NodeBuilder} A reference to this node builder.
*/
addFlowTab() {
this.tab += '\t';
return this;
}
/**
* Removes a tab.
*
* @return {NodeBuilder} A reference to this node builder.
*/
removeFlowTab() {
this.tab = this.tab.slice( 0, - 1 );
return this;
}
/**
* Gets the current flow data based on a Node.
*
* @param {Node} node - Node that the flow was started.
* @param {('vertex'|'fragment'|'compute'|'any')} shaderStage - The shader stage.
* @return {Object} The flow data.
*/
getFlowData( node/*, shaderStage*/ ) {
return this.flowsData.get( node );
}
/**
* Executes the node flow based on a root node to generate the final shader code.
*
* @param {Node} node - The node to execute.
* @return {Object} The code flow.
*/
flowNode( node ) {
const output = node.getNodeType( this );
const flowData = this.flowChildNode( node, output );
this.flowsData.set( node, flowData );
return flowData;
}
/**
* Returns the native shader operator name for a given generic name.
* It is a similar type of method like {@link NodeBuilder#getMethod}.
*
* @param {ShaderNodeInternal} shaderNode - The shader node to build the function node with.
* @return {FunctionNode} The build function node.
*/
buildFunctionNode( shaderNode ) {
const fn = new FunctionNode();
const previous = this.currentFunctionNode;
this.currentFunctionNode = fn;
fn.code = this.buildFunctionCode( shaderNode );
this.currentFunctionNode = previous;
return fn;
}
/**
* Generates a code flow based on a TSL function: Fn().
*
* @param {ShaderNodeInternal} shaderNode - A function code will be generated based on the input.
* @return {Object}
*/
flowShaderNode( shaderNode ) {
const layout = shaderNode.layout;
const inputs = {
[ Symbol.iterator ]() {
let index = 0;
const values = Object.values( this );
return {
next: () => ( {
value: values[ index ],
done: index ++ >= values.length
} )
};
}
};
for ( const input of layout.inputs ) {
inputs[ input.name ] = new ParameterNode( input.type, input.name );
}
//
shaderNode.layout = null;
const callNode = shaderNode.call( inputs );
const flowData = this.flowStagesNode( callNode, layout.type );
shaderNode.layout = layout;
return flowData;
}
/**
* Runs the node flow through all the steps of creation, 'setup', 'analyze', 'generate'.
*
* @param {Node} node - The node to execute.
* @param {String?} output - Expected output type. For example 'vec3'.
* @return {Object}
*/
flowStagesNode( node, output = null ) {
const previousFlow = this.flow;
const previousVars = this.vars;
const previousCache = this.cache;
const previousBuildStage = this.buildStage;
const previousStack = this.stack;
const flow = {
code: ''
};
this.flow = flow;
this.vars = {};
this.cache = new NodeCache();
this.stack = stack();
for ( const buildStage of defaultBuildStages ) {
this.setBuildStage( buildStage );
flow.result = node.build( this, output );
}
flow.vars = this.getVars( this.shaderStage );
this.flow = previousFlow;
this.vars = previousVars;
this.cache = previousCache;
this.stack = previousStack;
this.setBuildStage( previousBuildStage );
return flow;
}
/**
* Returns the native shader operator name for a given generic name.
* It is a similar type of method like {@link NodeBuilder#getMethod}.
*
* @abstract
* @param {String} op - The operator name to resolve.
* @return {String} The resolved operator name.
*/
getFunctionOperator( /* op */ ) {
return null;
}
/**
* Generates a code flow based on a child Node.
*
* @param {Node} node - The node to execute.
* @param {String?} output - Expected output type. For example 'vec3'.
* @return {Object} The code flow.
*/
flowChildNode( node, output = null ) {
const previousFlow = this.flow;
const flow = {
code: ''
};
this.flow = flow;
flow.result = node.build( this, output );
this.flow = previousFlow;
return flow;
}
/**
* Executes a flow of code in a different stage.
*
* Some nodes like `varying()` have the ability to compute code in vertex-stage and
* return the value in fragment-stage even if it is being executed in an input fragment.
*
* @param {('vertex'|'fragment'|'compute'|'any')} shaderStage - The shader stage.
* @param {Node} node - The node to execute.
* @param {String?} output - Expected output type. For example 'vec3'.
* @param {String?} propertyName - The property name to assign the result.
* @return {Object}
*/
flowNodeFromShaderStage( shaderStage, node, output = null, propertyName = null ) {
const previousShaderStage = this.shaderStage;
this.setShaderStage( shaderStage );
const flowData = this.flowChildNode( node, output );
if ( propertyName !== null ) {
flowData.code += `${ this.tab + propertyName } = ${ flowData.result };\n`;
}
this.flowCode[ shaderStage ] = this.flowCode[ shaderStage ] + flowData.code;
this.setShaderStage( previousShaderStage );
return flowData;
}
/**
* Returns an array holding all node attributes of this node builder.
*
* @return {Array<NodeAttribute>} The node attributes of this builder.
*/
getAttributesArray() {
return this.attributes.concat( this.bufferAttributes );
}
/**
* Returns the attribute definitions as a shader string for the given shader stage.
*
* @abstract
* @param {('vertex'|'fragment'|'compute'|'any')} shaderStage - The shader stage.
* @return {String} The attribute code section.
*/
getAttributes( /*shaderStage*/ ) {
console.warn( 'Abstract function.' );
}
/**
* Returns the varying definitions as a shader string for the given shader stage.
*
* @abstract
* @param {('vertex'|'fragment'|'compute'|'any')} shaderStage - The shader stage.
* @return {String} The varying code section.
*/
getVaryings( /*shaderStage*/ ) {
console.warn( 'Abstract function.' );
}
/**
* Returns a single variable definition as a shader string for the given variable type and name.
*
* @param {String} type - The variable's type.
* @param {String} name - The variable's name.
* @param {Number?} [count=null] - The array length.
* @return {String} The shader string.
*/
getVar( type, name, count = null ) {
return `${ count !== null ? this.generateArrayDeclaration( type, count ) : this.getType( type ) } ${ name }`;
}
/**
* Returns the variable definitions as a shader string for the given shader stage.
*
* @param {('vertex'|'fragment'|'compute'|'any')} shaderStage - The shader stage.
* @return {String} The variable code section.
*/
getVars( shaderStage ) {
let snippet = '';
const vars = this.vars[ shaderStage ];
if ( vars !== undefined ) {
for ( const variable of vars ) {
snippet += `${ this.getVar( variable.type, variable.name ) }; `;
}
}
return snippet;
}
/**
* Returns the uniform definitions as a shader string for the given shader stage.
*
* @abstract
* @param {('vertex'|'fragment'|'compute'|'any')} shaderStage - The shader stage.
* @return {String} The uniform code section.
*/
getUniforms( /*shaderStage*/ ) {
console.warn( 'Abstract function.' );
}
/**
* Returns the native code definitions as a shader string for the given shader stage.
*
* @param {('vertex'|'fragment'|'compute'|'any')} shaderStage - The shader stage.
* @return {String} The native code section.
*/
getCodes( shaderStage ) {
const codes = this.codes[ shaderStage ];
let code = '';
if ( codes !== undefined ) {
for ( const nodeCode of codes ) {
code += nodeCode.code + '\n';
}
}
return code;
}
/**
* Returns the hash of this node builder.
*
* @return {String} The hash.
*/
getHash() {
return this.vertexShader + this.fragmentShader + this.computeShader;
}
/**
* Sets the current shader stage.
*
* @param {('vertex'|'fragment'|'compute'|'any')?} shaderStage - The shader stage to set.
*/
setShaderStage( shaderStage ) {
this.shaderStage = shaderStage;
}
/**
* Returns the current shader stage.
*
* @return {('vertex'|'fragment'|'compute'|'any')?} The current shader stage.
*/
getShaderStage() {
return this.shaderStage;
}
/**
* Sets the current build stage.
*
* @param {('setup'|'analyze'|'generate')?} buildStage - The build stage to set.
*/
setBuildStage( buildStage ) {
this.buildStage = buildStage;
}
/**
* Returns the current build stage.
*
* @return {('setup'|'analyze'|'generate')?} The current build stage.
*/
getBuildStage() {
return this.buildStage;
}
/**
* Controls the code build of the shader stages.
*
* @abstract
*/
buildCode() {
console.warn( 'Abstract function.' );
}
/**
* Central build method which controls the build for the given object.
*
* @return {NodeBuilder} A reference to this node builder.
*/
build() {
const { object, material, renderer } = this;
if ( material !== null ) {
let nodeMaterial = renderer.library.fromMaterial( material );
if ( nodeMaterial === null ) {
console.error( `NodeMaterial: Material "${ material.type }" is not compatible.` );
nodeMaterial = new NodeMaterial();
}
nodeMaterial.build( this );
} else {
this.addFlow( 'compute', object );
}
// setup() -> stage 1: create possible new nodes and returns an output reference node
// analyze() -> stage 2: analyze nodes to possible optimization and validation
// generate() -> stage 3: generate shader
for ( const buildStage of defaultBuildStages ) {
this.setBuildStage( buildStage );
if ( this.context.vertex && this.context.vertex.isNode ) {
this.flowNodeFromShaderStage( 'vertex', this.context.vertex );
}
for ( const shaderStage of shaderStages ) {
this.setShaderStage( shaderStage );
const flowNodes = this.flowNodes[ shaderStage ];
for ( const node of flowNodes ) {
if ( buildStage === 'generate' ) {
this.flowNode( node );
} else {
node.build( this );
}
}
}
}
this.setBuildStage( null );
this.setShaderStage( null );
// stage 4: build code for a specific output
this.buildCode();
this.buildUpdateNodes();
return this;
}
/**
* Returns a uniform representation which is later used for UBO generation and rendering.
*
* @param {NodeUniform} uniformNode - The uniform node.
* @param {String} type - The requested type.
* @return {Uniform} The uniform.
*/
getNodeUniform( uniformNode, type ) {
if ( type === 'float' || type === 'int' || type === 'uint' ) return new NumberNodeUniform( uniformNode );
if ( type === 'vec2' || type === 'ivec2' || type === 'uvec2' ) return new Vector2NodeUniform( uniformNode );
if ( type === 'vec3' || type === 'ivec3' || type === 'uvec3' ) return new Vector3NodeUniform( uniformNode );
if ( type === 'vec4' || type === 'ivec4' || type === 'uvec4' ) return new Vector4NodeUniform( uniformNode );
if ( type === 'color' ) return new ColorNodeUniform( uniformNode );
if ( type === 'mat2' ) return new Matrix2NodeUniform( uniformNode );
if ( type === 'mat3' ) return new Matrix3NodeUniform( uniformNode );
if ( type === 'mat4' ) return new Matrix4NodeUniform( uniformNode );
throw new Error( `Uniform "${type}" not declared.` );
}
/**
* Formats the given shader snippet from a given type into another one. E.g.
* this method might be used to convert a simple float string `"1.0"` into a
* `vec3` representation: `"vec3<f32>( 1.0 )"`.
*
* @param {String} snippet - The shader snippet.
* @param {String} fromType - The source type.
* @param {String} toType - The target type.
* @return {String} The updated shader string.
*/
format( snippet, fromType, toType ) {
fromType = this.getVectorType( fromType );
toType = this.getVectorType( toType );
if ( fromType === toType || toType === null || this.isReference( toType ) ) {
return snippet;
}
const fromTypeLength = this.getTypeLength( fromType );
const toTypeLength = this.getTypeLength( toType );
if ( fromTypeLength === 16 && toTypeLength === 9 ) {
return `${ this.getType( toType ) }(${ snippet }[0].xyz, ${ snippet }[1].xyz, ${ snippet }[2].xyz)`;
}
if ( fromTypeLength === 9 && toTypeLength === 4 ) {
return `${ this.getType( toType ) }(${ snippet }[0].xy, ${ snippet }[1].xy)`;
}
if ( fromTypeLength > 4 ) { // fromType is matrix-like
// @TODO: ignore for now
return snippet;
}
if ( toTypeLength > 4 || toTypeLength === 0 ) { // toType is matrix-like or unknown
// @TODO: ignore for now
return snippet;
}
if ( fromTypeLength === toTypeLength ) {
return `${ this.getType( toType ) }( ${ snippet } )`;
}
if ( fromTypeLength > toTypeLength ) {
return this.format( `${ snippet }.${ 'xyz'.slice( 0, toTypeLength ) }`, this.getTypeFromLength( toTypeLength, this.getComponentType( fromType ) ), toType );
}
if ( toTypeLength === 4 && fromTypeLength > 1 ) { // toType is vec4-like
return `${ this.getType( toType ) }( ${ this.format( snippet, fromType, 'vec3' ) }, 1.0 )`;
}
if ( fromTypeLength === 2 ) { // fromType is vec2-like and toType is vec3-like
return `${ this.getType( toType ) }( ${ this.format( snippet, fromType, 'vec2' ) }, 0.0 )`;
}
if ( fromTypeLength === 1 && toTypeLength > 1 && fromType !== this.getComponentType( toType ) ) { // fromType is float-like
// convert a number value to vector type, e.g:
// vec3( 1u ) -> vec3( float( 1u ) )
snippet = `${ this.getType( this.getComponentType( toType ) ) }( ${ snippet } )`;
}
return `${ this.getType( toType ) }( ${ snippet } )`; // fromType is float-like
}
/**
* Returns a signature with the engine's current revision.
*
* @return {String} The signature.
*/
getSignature() {
return `// Three.js r${ REVISION } - Node System\n`;
}
// Deprecated
/**
* @function
* @deprecated since r168. Use `new NodeMaterial()` instead, with targeted node material name.
*
* @param {String} [type='NodeMaterial'] - The node material type.
* @throws {Error}
*/
createNodeMaterial( type = 'NodeMaterial' ) { // @deprecated, r168
throw new Error( `THREE.NodeBuilder: createNodeMaterial() was deprecated. Use new ${ type }() instead.` );
}
}
/**
* Management class for updating nodes. The module tracks metrics like
* the elapsed time, delta time, the render and frame ID to correctly
* call the node update methods {@link Node#updateBefore}, {@link Node#update}
* and {@link Node#updateAfter} depending on the node's configuration.
*/
class NodeFrame {
/**
* Constructs a new node fame.
*/
constructor() {
/**
* The elapsed time in seconds.
*
* @type {Number}
* @default 0
*/
this.time = 0;
/**
* The delta time in seconds.
*
* @type {Number}
* @default 0
*/
this.deltaTime = 0;
/**
* The frame ID.
*
* @type {Number}
* @default 0
*/
this.frameId = 0;
/**
* The render ID.
*
* @type {Number}
* @default 0
*/
this.renderId = 0;
/**
* Used to control the {@link Node#update} call.
*
* @type {WeakMap<Node, Object>}
*/
this.updateMap = new WeakMap();
/**
* Used to control the {@link Node#updateBefore} call.
*
* @type {WeakMap<Node, Object>}
*/
this.updateBeforeMap = new WeakMap();
/**
* Used to control the {@link Node#updateAfter} call.
*
* @type {WeakMap<Node, Object>}
*/
this.updateAfterMap = new WeakMap();
/**
* A reference to the current renderer.
*
* @type {Renderer?}
* @default null
*/
this.renderer = null;
/**
* A reference to the current material.
*
* @type {Material?}
* @default null
*/
this.material = null;
/**
* A reference to the current camera.
*
* @type {Camera?}
* @default null
*/
this.camera = null;
/**
* A reference to the current 3D object.
*
* @type {Object3D?}
* @default null
*/
this.object = null;
/**
* A reference to the current scene.
*
* @type {Scene?}
* @default null
*/
this.scene = null;
}
/**
* Returns a dictionary for a given node and update map which
* is used to correctly call node update methods per frame or render.
*
* @private
* @param {WeakMap<Node, Object>} referenceMap - The reference weak map.
* @param {Node} nodeRef - The reference to the current node.
* @return {Object<String,WeakMap>} The dictionary.
*/
_getMaps( referenceMap, nodeRef ) {
let maps = referenceMap.get( nodeRef );
if ( maps === undefined ) {
maps = {
renderMap: new WeakMap(),
frameMap: new WeakMap()
};
referenceMap.set( nodeRef, maps );
}
return maps;
}
/**
* This method executes the {@link Node#updateBefore} for the given node.
* It makes sure {@link Node#updateBeforeType} is honored meaning the update
* is only executed once per frame, render or object depending on the update
* type.
*
* @param {Node} node - The node that should be updated.
*/
updateBeforeNode( node ) {
const updateType = node.getUpdateBeforeType();
const reference = node.updateReference( this );
if ( updateType === NodeUpdateType.FRAME ) {
const { frameMap } = this._getMaps( this.updateBeforeMap, reference );
if ( frameMap.get( reference ) !== this.frameId ) {
if ( node.updateBefore( this ) !== false ) {
frameMap.set( reference, this.frameId );
}
}
} else if ( updateType === NodeUpdateType.RENDER ) {
const { renderMap } = this._getMaps( this.updateBeforeMap, reference );
if ( renderMap.get( reference ) !== this.renderId ) {
if ( node.updateBefore( this ) !== false ) {
renderMap.set( reference, this.renderId );
}
}
} else if ( updateType === NodeUpdateType.OBJECT ) {
node.updateBefore( this );
}
}
/**
* This method executes the {@link Node#updateAfter} for the given node.
* It makes sure {@link Node#updateAfterType} is honored meaning the update
* is only executed once per frame, render or object depending on the update
* type.
*
* @param {Node} node - The node that should be updated.
*/
updateAfterNode( node ) {
const updateType = node.getUpdateAfterType();
const reference = node.updateReference( this );
if ( updateType === NodeUpdateType.FRAME ) {
const { frameMap } = this._getMaps( this.updateAfterMap, reference );
if ( frameMap.get( reference ) !== this.frameId ) {
if ( node.updateAfter( this ) !== false ) {
frameMap.set( reference, this.frameId );
}
}
} else if ( updateType === NodeUpdateType.RENDER ) {
const { renderMap } = this._getMaps( this.updateAfterMap, reference );
if ( renderMap.get( reference ) !== this.renderId ) {
if ( node.updateAfter( this ) !== false ) {
renderMap.set( reference, this.renderId );
}
}
} else if ( updateType === NodeUpdateType.OBJECT ) {
node.updateAfter( this );
}
}
/**
* This method executes the {@link Node#update} for the given node.
* It makes sure {@link Node#updateType} is honored meaning the update
* is only executed once per frame, render or object depending on the update
* type.
*
* @param {Node} node - The node that should be updated.
*/
updateNode( node ) {
const updateType = node.getUpdateType();
const reference = node.updateReference( this );
if ( updateType === NodeUpdateType.FRAME ) {
const { frameMap } = this._getMaps( this.updateMap, reference );
if ( frameMap.get( reference ) !== this.frameId ) {
if ( node.update( this ) !== false ) {
frameMap.set( reference, this.frameId );
}
}
} else if ( updateType === NodeUpdateType.RENDER ) {
const { renderMap } = this._getMaps( this.updateMap, reference );
if ( renderMap.get( reference ) !== this.renderId ) {
if ( node.update( this ) !== false ) {
renderMap.set( reference, this.renderId );
}
}
} else if ( updateType === NodeUpdateType.OBJECT ) {
node.update( this );
}
}
/**
* Updates the internal state of the node frame. This method is
* called by the renderer in its internal animation loop.
*/
update() {
this.frameId ++;
if ( this.lastTime === undefined ) this.lastTime = performance.now();
this.deltaTime = ( performance.now() - this.lastTime ) / 1000;
this.lastTime = performance.now();
this.time += this.deltaTime;
}
}
/**
* Describes the input of a {@link NodeFunction}.
*/
class NodeFunctionInput {
/**
* Constructs a new node function input.
*
* @param {String} type - The input type.
* @param {String} name - The input name.
* @param {Number?} [count=null] - If the input is an Array, count will be the length.
* @param {('in'|'out'|'inout')} [qualifier=''] - The parameter qualifier (only relevant for GLSL).
* @param {Boolean} [isConst=false] - Whether the input uses a const qualifier or not (only relevant for GLSL).
*/
constructor( type, name, count = null, qualifier = '', isConst = false ) {
/**
* The input type.
*
* @type {String}
*/
this.type = type;
/**
* The input name.
*
* @type {String}
*/
this.name = name;
/**
* If the input is an Array, count will be the length.
*
* @type {Number?}
* @default null
*/
this.count = count;
/**
*The parameter qualifier (only relevant for GLSL).
*
* @type {('in'|'out'|'inout')}
* @default ''
*/
this.qualifier = qualifier;
/**
* Whether the input uses a const qualifier or not (only relevant for GLSL).
*
* @type {Boolean}
* @default false
*/
this.isConst = isConst;
}
}
NodeFunctionInput.isNodeFunctionInput = true;
/**
* Module for representing directional lights as nodes.
*
* @augments AnalyticLightNode
*/
class DirectionalLightNode extends AnalyticLightNode {
static get type() {
return 'DirectionalLightNode';
}
/**
* Constructs a new directional light node.
*
* @param {DirectionalLight?} [light=null] - The directional light source.
*/
constructor( light = null ) {
super( light );
}
setup( builder ) {
super.setup( builder );
const lightingModel = builder.context.lightingModel;
const lightColor = this.colorNode;
const lightDirection = lightTargetDirection( this.light );
const reflectedLight = builder.context.reflectedLight;
lightingModel.direct( {
lightDirection,
lightColor,
reflectedLight
}, builder.stack, builder );
}
}
const _matrix41 = /*@__PURE__*/ new Matrix4();
const _matrix42 = /*@__PURE__*/ new Matrix4();
let _ltcLib = null;
/**
* Module for representing rect area lights as nodes.
*
* @augments AnalyticLightNode
*/
class RectAreaLightNode extends AnalyticLightNode {
static get type() {
return 'RectAreaLightNode';
}
/**
* Constructs a new rect area light node.
*
* @param {RectAreaLight?} [light=null] - The rect area light source.
*/
constructor( light = null ) {
super( light );
/**
* Uniform node representing the half height of the are light.
*
* @type {UniformNode<vec3>}
*/
this.halfHeight = uniform( new Vector3() ).setGroup( renderGroup );
/**
* Uniform node representing the half width of the are light.
*
* @type {UniformNode<vec3>}
*/
this.halfWidth = uniform( new Vector3() ).setGroup( renderGroup );
/**
* The `updateType` is set to `NodeUpdateType.RENDER` since the light
* relies on `viewMatrix` which might vary per render call.
*
* @type {String}
* @default 'render'
*/
this.updateType = NodeUpdateType.RENDER;
}
/**
* Overwritten to updated rect area light specific uniforms.
*
* @param {NodeFrame} frame - A reference to the current node frame.
*/
update( frame ) {
super.update( frame );
const { light } = this;
const viewMatrix = frame.camera.matrixWorldInverse;
_matrix42.identity();
_matrix41.copy( light.matrixWorld );
_matrix41.premultiply( viewMatrix );
_matrix42.extractRotation( _matrix41 );
this.halfWidth.value.set( light.width * 0.5, 0.0, 0.0 );
this.halfHeight.value.set( 0.0, light.height * 0.5, 0.0 );
this.halfWidth.value.applyMatrix4( _matrix42 );
this.halfHeight.value.applyMatrix4( _matrix42 );
}
setup( builder ) {
super.setup( builder );
let ltc_1, ltc_2;
if ( builder.isAvailable( 'float32Filterable' ) ) {
ltc_1 = texture( _ltcLib.LTC_FLOAT_1 );
ltc_2 = texture( _ltcLib.LTC_FLOAT_2 );
} else {
ltc_1 = texture( _ltcLib.LTC_HALF_1 );
ltc_2 = texture( _ltcLib.LTC_HALF_2 );
}
const { colorNode, light } = this;
const lightingModel = builder.context.lightingModel;
const lightPosition = lightViewPosition( light );
const reflectedLight = builder.context.reflectedLight;
lightingModel.directRectArea( {
lightColor: colorNode,
lightPosition,
halfWidth: this.halfWidth,
halfHeight: this.halfHeight,
reflectedLight,
ltc_1,
ltc_2
}, builder.stack, builder );
}
/**
* Used to configure the internal BRDF approximation texture data.
*
* @param {RectAreaLightTexturesLib} ltc - The BRDF approximation texture data.
*/
static setLTC( ltc ) {
_ltcLib = ltc;
}
}
/**
* Module for representing spot lights as nodes.
*
* @augments AnalyticLightNode
*/
class SpotLightNode extends AnalyticLightNode {
static get type() {
return 'SpotLightNode';
}
/**
* Constructs a new spot light node.
*
* @param {SpotLight?} [light=null] - The spot light source.
*/
constructor( light = null ) {
super( light );
/**
* Uniform node representing the cone cosine.
*
* @type {UniformNode<float>}
*/
this.coneCosNode = uniform( 0 ).setGroup( renderGroup );
/**
* Uniform node representing the penumbra cosine.
*
* @type {UniformNode<float>}
*/
this.penumbraCosNode = uniform( 0 ).setGroup( renderGroup );
/**
* Uniform node representing the cutoff distance.
*
* @type {UniformNode<float>}
*/
this.cutoffDistanceNode = uniform( 0 ).setGroup( renderGroup );
/**
* Uniform node representing the decay exponent.
*
* @type {UniformNode<float>}
*/
this.decayExponentNode = uniform( 0 ).setGroup( renderGroup );
}
/**
* Overwritten to updated spot light specific uniforms.
*
* @param {NodeFrame} frame - A reference to the current node frame.
*/
update( frame ) {
super.update( frame );
const { light } = this;
this.coneCosNode.value = Math.cos( light.angle );
this.penumbraCosNode.value = Math.cos( light.angle * ( 1 - light.penumbra ) );
this.cutoffDistanceNode.value = light.distance;
this.decayExponentNode.value = light.decay;
}
/**
* Computes the spot attenuation for the given angle.
*
* @param {Node<float>} angleCosine - The angle to compute the spot attenuation for.
* @return {Node<float>} The spot attenuation.
*/
getSpotAttenuation( angleCosine ) {
const { coneCosNode, penumbraCosNode } = this;
return smoothstep( coneCosNode, penumbraCosNode, angleCosine );
}
setup( builder ) {
super.setup( builder );
const lightingModel = builder.context.lightingModel;
const { colorNode, cutoffDistanceNode, decayExponentNode, light } = this;
const lVector = lightViewPosition( light ).sub( positionView ); // @TODO: Add it into LightNode
const lightDirection = lVector.normalize();
const angleCos = lightDirection.dot( lightTargetDirection( light ) );
const spotAttenuation = this.getSpotAttenuation( angleCos );
const lightDistance = lVector.length();
const lightAttenuation = getDistanceAttenuation( {
lightDistance,
cutoffDistance: cutoffDistanceNode,
decayExponent: decayExponentNode
} );
let lightColor = colorNode.mul( spotAttenuation ).mul( lightAttenuation );
if ( light.map ) {
const spotLightCoord = lightProjectionUV( light );
const projectedTexture = texture( light.map, spotLightCoord.xy ).onRenderUpdate( () => light.map );
const inSpotLightMap = spotLightCoord.mul( 2. ).sub( 1. ).abs().lessThan( 1. ).all();
lightColor = inSpotLightMap.select( lightColor.mul( projectedTexture ), lightColor );
}
const reflectedLight = builder.context.reflectedLight;
lightingModel.direct( {
lightDirection,
lightColor,
reflectedLight
}, builder.stack, builder );
}
}
/**
* An IES version of the default spot light node.
*
* @augments SpotLightNode
*/
class IESSpotLightNode extends SpotLightNode {
static get type() {
return 'IESSpotLightNode';
}
/**
* Overwrites the default implementation to compute an IES conform spot attenuation.
*
* @param {Node<float>} angleCosine - The angle to compute the spot attenuation for.
* @return {Node<float>} The spot attenuation.
*/
getSpotAttenuation( angleCosine ) {
const iesMap = this.light.iesMap;
let spotAttenuation = null;
if ( iesMap && iesMap.isTexture === true ) {
const angle = angleCosine.acos().mul( 1.0 / Math.PI );
spotAttenuation = texture( iesMap, vec2( angle, 0 ), 0 ).r;
} else {
spotAttenuation = super.getSpotAttenuation( angleCosine );
}
return spotAttenuation;
}
}
/**
* Module for representing ambient lights as nodes.
*
* @augments AnalyticLightNode
*/
class AmbientLightNode extends AnalyticLightNode {
static get type() {
return 'AmbientLightNode';
}
/**
* Constructs a new ambient light node.
*
* @param {AmbientLight?} [light=null] - The ambient light source.
*/
constructor( light = null ) {
super( light );
}
setup( { context } ) {
context.irradiance.addAssign( this.colorNode );
}
}
/**
* Module for representing hemisphere lights as nodes.
*
* @augments AnalyticLightNode
*/
class HemisphereLightNode extends AnalyticLightNode {
static get type() {
return 'HemisphereLightNode';
}
/**
* Constructs a new hemisphere light node.
*
* @param {HemisphereLight?} [light=null] - The hemisphere light source.
*/
constructor( light = null ) {
super( light );
/**
* Uniform node representing the light's position.
*
* @type {UniformNode<vec3>}
*/
this.lightPositionNode = lightPosition( light );
/**
* A node representing the light's direction.
*
* @type {Node<vec3>}
*/
this.lightDirectionNode = this.lightPositionNode.normalize();
/**
* Uniform node representing the light's ground color.
*
* @type {UniformNode<vec3>}
*/
this.groundColorNode = uniform( new Color() ).setGroup( renderGroup );
}
/**
* Overwritten to updated hemisphere light specific uniforms.
*
* @param {NodeFrame} frame - A reference to the current node frame.
*/
update( frame ) {
const { light } = this;
super.update( frame );
this.lightPositionNode.object3d = light;
this.groundColorNode.value.copy( light.groundColor ).multiplyScalar( light.intensity );
}
setup( builder ) {
const { colorNode, groundColorNode, lightDirectionNode } = this;
const dotNL = normalView.dot( lightDirectionNode );
const hemiDiffuseWeight = dotNL.mul( 0.5 ).add( 0.5 );
const irradiance = mix( groundColorNode, colorNode, hemiDiffuseWeight );
builder.context.irradiance.addAssign( irradiance );
}
}
/**
* Module for representing light probes as nodes.
*
* @augments AnalyticLightNode
*/
class LightProbeNode extends AnalyticLightNode {
static get type() {
return 'LightProbeNode';
}
/**
* Constructs a new light probe node.
*
* @param {LightProbe?} [light=null] - The light probe.
*/
constructor( light = null ) {
super( light );
const array = [];
for ( let i = 0; i < 9; i ++ ) array.push( new Vector3() );
/**
* Light probe represented as a uniform of spherical harmonics.
*
* @type {UniformArrayNode}
*/
this.lightProbe = uniformArray( array );
}
/**
* Overwritten to updated light probe specific uniforms.
*
* @param {NodeFrame} frame - A reference to the current node frame.
*/
update( frame ) {
const { light } = this;
super.update( frame );
//
for ( let i = 0; i < 9; i ++ ) {
this.lightProbe.array[ i ].copy( light.sh.coefficients[ i ] ).multiplyScalar( light.intensity );
}
}
setup( builder ) {
const irradiance = getShIrradianceAt( normalWorld, this.lightProbe );
builder.context.irradiance.addAssign( irradiance );
}
}
/**
* Base class for node parsers. A derived parser must be implemented
* for each supported native shader language.
*/
class NodeParser {
/**
* The method parses the given native code an returns a node function.
*
* @abstract
* @param {String} source - The native shader code.
* @return {NodeFunction} A node function.
*/
parseFunction( /*source*/ ) {
console.warn( 'Abstract function.' );
}
}
/**
* Base class for node functions. A derived module must be implemented
* for each supported native shader language. Similar to other `Node*` modules,
* this class is only relevant during the building process and not used
* in user-level code.
*/
class NodeFunction {
/**
* Constructs a new node function.
*
* @param {String} type - The node type. This type is the return type of the node function.
* @param {Array<NodeFunctionInput>} inputs - The function's inputs.
* @param {String} [name=''] - The function's name.
* @param {String} [precision=''] - The precision qualifier.
*/
constructor( type, inputs, name = '', precision = '' ) {
/**
* The node type. This type is the return type of the node function.
*
* @type {String}
*/
this.type = type;
/**
* The function's inputs.
*
* @type {Array<NodeFunctionInput>}
*/
this.inputs = inputs;
/**
* The name of the uniform.
*
* @type {String}
* @default ''
*/
this.name = name;
/**
* The precision qualifier.
*
* @type {String}
* @default ''
*/
this.precision = precision;
}
/**
* This method returns the native code of the node function.
*
* @abstract
* @param {String} name - The function's name.
* @return {String} A shader code.
*/
getCode( /*name = this.name*/ ) {
console.warn( 'Abstract function.' );
}
}
NodeFunction.isNodeFunction = true;
const declarationRegexp$1 = /^\s*(highp|mediump|lowp)?\s*([a-z_0-9]+)\s*([a-z_0-9]+)?\s*\(([\s\S]*?)\)/i;
const propertiesRegexp$1 = /[a-z_0-9]+/ig;
const pragmaMain = '#pragma main';
const parse$1 = ( source ) => {
source = source.trim();
const pragmaMainIndex = source.indexOf( pragmaMain );
const mainCode = pragmaMainIndex !== - 1 ? source.slice( pragmaMainIndex + pragmaMain.length ) : source;
const declaration = mainCode.match( declarationRegexp$1 );
if ( declaration !== null && declaration.length === 5 ) {
// tokenizer
const inputsCode = declaration[ 4 ];
const propsMatches = [];
let nameMatch = null;
while ( ( nameMatch = propertiesRegexp$1.exec( inputsCode ) ) !== null ) {
propsMatches.push( nameMatch );
}
// parser
const inputs = [];
let i = 0;
while ( i < propsMatches.length ) {
const isConst = propsMatches[ i ][ 0 ] === 'const';
if ( isConst === true ) {
i ++;
}
let qualifier = propsMatches[ i ][ 0 ];
if ( qualifier === 'in' || qualifier === 'out' || qualifier === 'inout' ) {
i ++;
} else {
qualifier = '';
}
const type = propsMatches[ i ++ ][ 0 ];
let count = Number.parseInt( propsMatches[ i ][ 0 ] );
if ( Number.isNaN( count ) === false ) i ++;
else count = null;
const name = propsMatches[ i ++ ][ 0 ];
inputs.push( new NodeFunctionInput( type, name, count, qualifier, isConst ) );
}
//
const blockCode = mainCode.substring( declaration[ 0 ].length );
const name = declaration[ 3 ] !== undefined ? declaration[ 3 ] : '';
const type = declaration[ 2 ];
const precision = declaration[ 1 ] !== undefined ? declaration[ 1 ] : '';
const headerCode = pragmaMainIndex !== - 1 ? source.slice( 0, pragmaMainIndex ) : '';
return {
type,
inputs,
name,
precision,
inputsCode,
blockCode,
headerCode
};
} else {
throw new Error( 'FunctionNode: Function is not a GLSL code.' );
}
};
/**
* This class represents a GLSL node function.
*
* @augments NodeFunction
*/
class GLSLNodeFunction extends NodeFunction {
/**
* Constructs a new GLSL node function.
*
* @param {String} source - The GLSL source.
*/
constructor( source ) {
const { type, inputs, name, precision, inputsCode, blockCode, headerCode } = parse$1( source );
super( type, inputs, name, precision );
this.inputsCode = inputsCode;
this.blockCode = blockCode;
this.headerCode = headerCode;
}
/**
* This method returns the GLSL code of the node function.
*
* @param {String} [name=this.name] - The function's name.
* @return {String} The shader code.
*/
getCode( name = this.name ) {
let code;
const blockCode = this.blockCode;
if ( blockCode !== '' ) {
const { type, inputsCode, headerCode, precision } = this;
let declarationCode = `${ type } ${ name } ( ${ inputsCode.trim() } )`;
if ( precision !== '' ) {
declarationCode = `${ precision } ${ declarationCode }`;
}
code = headerCode + declarationCode + blockCode;
} else {
// interface function
code = '';
}
return code;
}
}
/**
* A GLSL node parser.
*
* @augments NodeParser
*/
class GLSLNodeParser extends NodeParser {
/**
* The method parses the given GLSL code an returns a node function.
*
* @param {String} source - The GLSL code.
* @return {GLSLNodeFunction} A node function.
*/
parseFunction( source ) {
return new GLSLNodeFunction( source );
}
}
const _outputNodeMap = new WeakMap();
const _chainKeys$2 = [];
const _cacheKeyValues = [];
/**
* This renderer module manages node-related objects and is the
* primary interface between the renderer and the node system.
*
* @private
* @augments DataMap
*/
class Nodes extends DataMap {
/**
* Constructs a new nodes management component.
*
* @param {Renderer} renderer - The renderer.
* @param {Backend} backend - The renderer's backend.
*/
constructor( renderer, backend ) {
super();
/**
* The renderer.
*
* @type {Renderer}
*/
this.renderer = renderer;
/**
* The renderer's backend.
*
* @type {Backend}
*/
this.backend = backend;
/**
* The node frame.
*
* @type {Renderer}
*/
this.nodeFrame = new NodeFrame();
/**
* A cache for managing node builder states.
*
* @type {Map<Number,NodeBuilderState>}
*/
this.nodeBuilderCache = new Map();
/**
* A cache for managing data cache key data.
*
* @type {ChainMap}
*/
this.callHashCache = new ChainMap();
/**
* A cache for managing node uniforms group data.
*
* @type {ChainMap}
*/
this.groupsData = new ChainMap();
/**
* A cache for managing node objects of
* scene properties like fog or environments.
*
* @type {Object<String,WeakMap>}
*/
this.cacheLib = {};
}
/**
* Returns `true` if the given node uniforms group must be updated or not.
*
* @param {NodeUniformsGroup} nodeUniformsGroup - The node uniforms group.
* @return {Boolean} Whether the node uniforms group requires an update or not.
*/
updateGroup( nodeUniformsGroup ) {
const groupNode = nodeUniformsGroup.groupNode;
const name = groupNode.name;
// objectGroup is always updated
if ( name === objectGroup.name ) return true;
// renderGroup is updated once per render/compute call
if ( name === renderGroup.name ) {
const uniformsGroupData = this.get( nodeUniformsGroup );
const renderId = this.nodeFrame.renderId;
if ( uniformsGroupData.renderId !== renderId ) {
uniformsGroupData.renderId = renderId;
return true;
}
return false;
}
// frameGroup is updated once per frame
if ( name === frameGroup.name ) {
const uniformsGroupData = this.get( nodeUniformsGroup );
const frameId = this.nodeFrame.frameId;
if ( uniformsGroupData.frameId !== frameId ) {
uniformsGroupData.frameId = frameId;
return true;
}
return false;
}
// other groups are updated just when groupNode.needsUpdate is true
_chainKeys$2[ 0 ] = groupNode;
_chainKeys$2[ 1 ] = nodeUniformsGroup;
let groupData = this.groupsData.get( _chainKeys$2 );
if ( groupData === undefined ) this.groupsData.set( _chainKeys$2, groupData = {} );
_chainKeys$2.length = 0;
if ( groupData.version !== groupNode.version ) {
groupData.version = groupNode.version;
return true;
}
return false;
}
/**
* Returns the cache key for the given render object.
*
* @param {RenderObject} renderObject - The render object.
* @return {Number} The cache key.
*/
getForRenderCacheKey( renderObject ) {
return renderObject.initialCacheKey;
}
/**
* Returns a node builder state for the given render object.
*
* @param {RenderObject} renderObject - The render object.
* @return {NodeBuilderState} The node builder state.
*/
getForRender( renderObject ) {
const renderObjectData = this.get( renderObject );
let nodeBuilderState = renderObjectData.nodeBuilderState;
if ( nodeBuilderState === undefined ) {
const { nodeBuilderCache } = this;
const cacheKey = this.getForRenderCacheKey( renderObject );
nodeBuilderState = nodeBuilderCache.get( cacheKey );
if ( nodeBuilderState === undefined ) {
const nodeBuilder = this.backend.createNodeBuilder( renderObject.object, this.renderer );
nodeBuilder.scene = renderObject.scene;
nodeBuilder.material = renderObject.material;
nodeBuilder.camera = renderObject.camera;
nodeBuilder.context.material = renderObject.material;
nodeBuilder.lightsNode = renderObject.lightsNode;
nodeBuilder.environmentNode = this.getEnvironmentNode( renderObject.scene );
nodeBuilder.fogNode = this.getFogNode( renderObject.scene );
nodeBuilder.clippingContext = renderObject.clippingContext;
nodeBuilder.build();
nodeBuilderState = this._createNodeBuilderState( nodeBuilder );
nodeBuilderCache.set( cacheKey, nodeBuilderState );
}
nodeBuilderState.usedTimes ++;
renderObjectData.nodeBuilderState = nodeBuilderState;
}
return nodeBuilderState;
}
/**
* Deletes the given object from the internal data map
*
* @param {Any} object - The object to delete.
* @return {Object?} The deleted dictionary.
*/
delete( object ) {
if ( object.isRenderObject ) {
const nodeBuilderState = this.get( object ).nodeBuilderState;
nodeBuilderState.usedTimes --;
if ( nodeBuilderState.usedTimes === 0 ) {
this.nodeBuilderCache.delete( this.getForRenderCacheKey( object ) );
}
}
return super.delete( object );
}
/**
* Returns a node builder state for the given compute node.
*
* @param {Node} computeNode - The compute node.
* @return {NodeBuilderState} The node builder state.
*/
getForCompute( computeNode ) {
const computeData = this.get( computeNode );
let nodeBuilderState = computeData.nodeBuilderState;
if ( nodeBuilderState === undefined ) {
const nodeBuilder = this.backend.createNodeBuilder( computeNode, this.renderer );
nodeBuilder.build();
nodeBuilderState = this._createNodeBuilderState( nodeBuilder );
computeData.nodeBuilderState = nodeBuilderState;
}
return nodeBuilderState;
}
/**
* Creates a node builder state for the given node builder.
*
* @private
* @param {NodeBuilder} nodeBuilder - The node builder.
* @return {NodeBuilderState} The node builder state.
*/
_createNodeBuilderState( nodeBuilder ) {
return new NodeBuilderState(
nodeBuilder.vertexShader,
nodeBuilder.fragmentShader,
nodeBuilder.computeShader,
nodeBuilder.getAttributesArray(),
nodeBuilder.getBindings(),
nodeBuilder.updateNodes,
nodeBuilder.updateBeforeNodes,
nodeBuilder.updateAfterNodes,
nodeBuilder.observer,
nodeBuilder.transforms
);
}
/**
* Returns an environment node for the current configured
* scene environment.
*
* @param {Scene} scene - The scene.
* @return {Node} A node representing the current scene environment.
*/
getEnvironmentNode( scene ) {
this.updateEnvironment( scene );
let environmentNode = null;
if ( scene.environmentNode && scene.environmentNode.isNode ) {
environmentNode = scene.environmentNode;
} else {
const sceneData = this.get( scene );
if ( sceneData.environmentNode ) {
environmentNode = sceneData.environmentNode;
}
}
return environmentNode;
}
/**
* Returns a background node for the current configured
* scene background.
*
* @param {Scene} scene - The scene.
* @return {Node} A node representing the current scene background.
*/
getBackgroundNode( scene ) {
this.updateBackground( scene );
let backgroundNode = null;
if ( scene.backgroundNode && scene.backgroundNode.isNode ) {
backgroundNode = scene.backgroundNode;
} else {
const sceneData = this.get( scene );
if ( sceneData.backgroundNode ) {
backgroundNode = sceneData.backgroundNode;
}
}
return backgroundNode;
}
/**
* Returns a fog node for the current configured scene fog.
*
* @param {Scene} scene - The scene.
* @return {Node} A node representing the current scene fog.
*/
getFogNode( scene ) {
this.updateFog( scene );
return scene.fogNode || this.get( scene ).fogNode || null;
}
/**
* Returns a cache key for the given scene and lights node.
* This key is used by `RenderObject` as a part of the dynamic
* cache key (a key that must be checked every time the render
* objects is drawn).
*
* @param {Scene} scene - The scene.
* @param {LightsNode} lightsNode - The lights node.
* @return {Number} The cache key.
*/
getCacheKey( scene, lightsNode ) {
_chainKeys$2[ 0 ] = scene;
_chainKeys$2[ 1 ] = lightsNode;
const callId = this.renderer.info.calls;
const cacheKeyData = this.callHashCache.get( _chainKeys$2 ) || {};
if ( cacheKeyData.callId !== callId ) {
const environmentNode = this.getEnvironmentNode( scene );
const fogNode = this.getFogNode( scene );
if ( lightsNode ) _cacheKeyValues.push( lightsNode.getCacheKey( true ) );
if ( environmentNode ) _cacheKeyValues.push( environmentNode.getCacheKey() );
if ( fogNode ) _cacheKeyValues.push( fogNode.getCacheKey() );
_cacheKeyValues.push( this.renderer.shadowMap.enabled ? 1 : 0 );
cacheKeyData.callId = callId;
cacheKeyData.cacheKey = hashArray( _cacheKeyValues );
this.callHashCache.set( _chainKeys$2, cacheKeyData );
_cacheKeyValues.length = 0;
}
_chainKeys$2.length = 0;
return cacheKeyData.cacheKey;
}
/**
* A boolean that indicates whether tone mapping should be enabled
* or not.
*
* @type {Boolean}
*/
get isToneMappingState() {
return this.renderer.getRenderTarget() ? false : true;
}
/**
* If a scene background is configured, this method makes sure to
* represent the background with a corresponding node-based implementation.
*
* @param {Scene} scene - The scene.
*/
updateBackground( scene ) {
const sceneData = this.get( scene );
const background = scene.background;
if ( background ) {
const forceUpdate = ( scene.backgroundBlurriness === 0 && sceneData.backgroundBlurriness > 0 ) || ( scene.backgroundBlurriness > 0 && sceneData.backgroundBlurriness === 0 );
if ( sceneData.background !== background || forceUpdate ) {
const backgroundNode = this.getCacheNode( 'background', background, () => {
if ( background.isCubeTexture === true || ( background.mapping === EquirectangularReflectionMapping || background.mapping === EquirectangularRefractionMapping || background.mapping === CubeUVReflectionMapping ) ) {
if ( scene.backgroundBlurriness > 0 || background.mapping === CubeUVReflectionMapping ) {
return pmremTexture( background );
} else {
let envMap;
if ( background.isCubeTexture === true ) {
envMap = cubeTexture( background );
} else {
envMap = texture( background );
}
return cubeMapNode( envMap );
}
} else if ( background.isTexture === true ) {
return texture( background, screenUV.flipY() ).setUpdateMatrix( true );
} else if ( background.isColor !== true ) {
console.error( 'WebGPUNodes: Unsupported background configuration.', background );
}
}, forceUpdate );
sceneData.backgroundNode = backgroundNode;
sceneData.background = background;
sceneData.backgroundBlurriness = scene.backgroundBlurriness;
}
} else if ( sceneData.backgroundNode ) {
delete sceneData.backgroundNode;
delete sceneData.background;
}
}
/**
* This method is part of the caching of nodes which are used to represents the
* scene's background, fog or environment.
*
* @param {String} type - The type of object to cache.
* @param {Object} object - The object.
* @param {Function} callback - A callback that produces a node representation for the given object.
* @param {Boolean} [forceUpdate=false] - Whether an update should be enforced or not.
* @return {Node} The node representation.
*/
getCacheNode( type, object, callback, forceUpdate = false ) {
const nodeCache = this.cacheLib[ type ] || ( this.cacheLib[ type ] = new WeakMap() );
let node = nodeCache.get( object );
if ( node === undefined || forceUpdate ) {
node = callback();
nodeCache.set( object, node );
}
return node;
}
/**
* If a scene fog is configured, this method makes sure to
* represent the fog with a corresponding node-based implementation.
*
* @param {Scene} scene - The scene.
*/
updateFog( scene ) {
const sceneData = this.get( scene );
const sceneFog = scene.fog;
if ( sceneFog ) {
if ( sceneData.fog !== sceneFog ) {
const fogNode = this.getCacheNode( 'fog', sceneFog, () => {
if ( sceneFog.isFogExp2 ) {
const color = reference( 'color', 'color', sceneFog ).setGroup( renderGroup );
const density = reference( 'density', 'float', sceneFog ).setGroup( renderGroup );
return fog( color, densityFogFactor( density ) );
} else if ( sceneFog.isFog ) {
const color = reference( 'color', 'color', sceneFog ).setGroup( renderGroup );
const near = reference( 'near', 'float', sceneFog ).setGroup( renderGroup );
const far = reference( 'far', 'float', sceneFog ).setGroup( renderGroup );
return fog( color, rangeFogFactor( near, far ) );
} else {
console.error( 'THREE.Renderer: Unsupported fog configuration.', sceneFog );
}
} );
sceneData.fogNode = fogNode;
sceneData.fog = sceneFog;
}
} else {
delete sceneData.fogNode;
delete sceneData.fog;
}
}
/**
* If a scene environment is configured, this method makes sure to
* represent the environment with a corresponding node-based implementation.
*
* @param {Scene} scene - The scene.
*/
updateEnvironment( scene ) {
const sceneData = this.get( scene );
const environment = scene.environment;
if ( environment ) {
if ( sceneData.environment !== environment ) {
const environmentNode = this.getCacheNode( 'environment', environment, () => {
if ( environment.isCubeTexture === true ) {
return cubeTexture( environment );
} else if ( environment.isTexture === true ) {
return texture( environment );
} else {
console.error( 'Nodes: Unsupported environment configuration.', environment );
}
} );
sceneData.environmentNode = environmentNode;
sceneData.environment = environment;
}
} else if ( sceneData.environmentNode ) {
delete sceneData.environmentNode;
delete sceneData.environment;
}
}
getNodeFrame( renderer = this.renderer, scene = null, object = null, camera = null, material = null ) {
const nodeFrame = this.nodeFrame;
nodeFrame.renderer = renderer;
nodeFrame.scene = scene;
nodeFrame.object = object;
nodeFrame.camera = camera;
nodeFrame.material = material;
return nodeFrame;
}
getNodeFrameForRender( renderObject ) {
return this.getNodeFrame( renderObject.renderer, renderObject.scene, renderObject.object, renderObject.camera, renderObject.material );
}
/**
* Returns the current output cache key.
*
* @return {String} The output cache key.
*/
getOutputCacheKey() {
const renderer = this.renderer;
return renderer.toneMapping + ',' + renderer.currentColorSpace;
}
/**
* Checks if the output configuration (tone mapping and color space) for
* the given target has changed.
*
* @param {Texture} outputTarget - The output target.
* @return {Boolean} Whether the output configuration has changed or not.
*/
hasOutputChange( outputTarget ) {
const cacheKey = _outputNodeMap.get( outputTarget );
return cacheKey !== this.getOutputCacheKey();
}
/**
* Returns a node that represents the output configuration (tone mapping and
* color space) for the current target.
*
* @param {Texture} outputTarget - The output target.
* @return {Node} The output node.
*/
getOutputNode( outputTarget ) {
const renderer = this.renderer;
const cacheKey = this.getOutputCacheKey();
const output = texture( outputTarget, screenUV ).renderOutput( renderer.toneMapping, renderer.currentColorSpace );
_outputNodeMap.set( outputTarget, cacheKey );
return output;
}
/**
* Triggers the call of `updateBefore()` methods
* for all nodes of the given render object.
*
* @param {RenderObject} renderObject - The render object.
*/
updateBefore( renderObject ) {
const nodeBuilder = renderObject.getNodeBuilderState();
for ( const node of nodeBuilder.updateBeforeNodes ) {
// update frame state for each node
this.getNodeFrameForRender( renderObject ).updateBeforeNode( node );
}
}
/**
* Triggers the call of `updateAfter()` methods
* for all nodes of the given render object.
*
* @param {RenderObject} renderObject - The render object.
*/
updateAfter( renderObject ) {
const nodeBuilder = renderObject.getNodeBuilderState();
for ( const node of nodeBuilder.updateAfterNodes ) {
// update frame state for each node
this.getNodeFrameForRender( renderObject ).updateAfterNode( node );
}
}
/**
* Triggers the call of `update()` methods
* for all nodes of the given compute node.
*
* @param {Node} computeNode - The compute node.
*/
updateForCompute( computeNode ) {
const nodeFrame = this.getNodeFrame();
const nodeBuilder = this.getForCompute( computeNode );
for ( const node of nodeBuilder.updateNodes ) {
nodeFrame.updateNode( node );
}
}
/**
* Triggers the call of `update()` methods
* for all nodes of the given compute node.
*
* @param {RenderObject} renderObject - The render object.
*/
updateForRender( renderObject ) {
const nodeFrame = this.getNodeFrameForRender( renderObject );
const nodeBuilder = renderObject.getNodeBuilderState();
for ( const node of nodeBuilder.updateNodes ) {
nodeFrame.updateNode( node );
}
}
/**
* Returns `true` if the given render object requires a refresh.
*
* @param {RenderObject} renderObject - The render object.
* @return {Boolean} Whether the given render object requires a refresh or not.
*/
needsRefresh( renderObject ) {
const nodeFrame = this.getNodeFrameForRender( renderObject );
const monitor = renderObject.getMonitor();
return monitor.needsRefresh( renderObject, nodeFrame );
}
/**
* Frees the internal resources.
*/
dispose() {
super.dispose();
this.nodeFrame = new NodeFrame();
this.nodeBuilderCache = new Map();
this.cacheLib = {};
}
}
const _plane = /*@__PURE__*/ new Plane();
/**
* Represents the state that is used to perform clipping via clipping planes.
* There is a default clipping context for each render context. When the
* scene holds instances of `ClippingGroup`, there will be a context for each
* group.
*
* @private
*/
class ClippingContext {
/**
* Constructs a new clipping context.
*
* @param {ClippingContext?} [parentContext=null] - A reference to the parent clipping context.
*/
constructor( parentContext = null ) {
/**
* The clipping context's version.
*
* @type {Number}
* @readonly
*/
this.version = 0;
/**
* Whether the intersection of the clipping planes is used to clip objects, rather than their union.
*
* @type {Boolean?}
* @default null
*/
this.clipIntersection = null;
/**
* The clipping context's cache key.
*
* @type {String}
*/
this.cacheKey = '';
/**
* Whether the shadow pass is active or not.
*
* @type {Boolean}
* @default false
*/
this.shadowPass = false;
/**
* The view normal matrix.
*
* @type {Matrix3}
*/
this.viewNormalMatrix = new Matrix3();
/**
* Internal cache for maintaining clipping contexts.
*
* @type {WeakMap<ClippingGroup,ClippingContext>}
*/
this.clippingGroupContexts = new WeakMap();
/**
* The intersection planes.
*
* @type {Array<Vector4>}
*/
this.intersectionPlanes = [];
/**
* The intersection planes.
*
* @type {Array<Vector4>}
*/
this.unionPlanes = [];
/**
* The version of the clipping context's parent context.
*
* @type {Number?}
* @readonly
*/
this.parentVersion = null;
if ( parentContext !== null ) {
this.viewNormalMatrix = parentContext.viewNormalMatrix;
this.clippingGroupContexts = parentContext.clippingGroupContexts;
this.shadowPass = parentContext.shadowPass;
this.viewMatrix = parentContext.viewMatrix;
}
}
/**
* Projects the given source clipping planes and writes the result into the
* destination array.
*
* @param {Array<Plane>} source - The source clipping planes.
* @param {Array<Vector4>} destination - The destination.
* @param {Number} offset - The offset.
*/
projectPlanes( source, destination, offset ) {
const l = source.length;
for ( let i = 0; i < l; i ++ ) {
_plane.copy( source[ i ] ).applyMatrix4( this.viewMatrix, this.viewNormalMatrix );
const v = destination[ offset + i ];
const normal = _plane.normal;
v.x = - normal.x;
v.y = - normal.y;
v.z = - normal.z;
v.w = _plane.constant;
}
}
/**
* Updates the root clipping context of a scene.
*
* @param {Scene} scene - The scene.
* @param {Camera} camera - The camera that is used to render the scene.
*/
updateGlobal( scene, camera ) {
this.shadowPass = ( scene.overrideMaterial !== null && scene.overrideMaterial.isShadowPassMaterial );
this.viewMatrix = camera.matrixWorldInverse;
this.viewNormalMatrix.getNormalMatrix( this.viewMatrix );
}
/**
* Updates the clipping context.
*
* @param {ClippingContext} parentContext - The parent context.
* @param {ClippingGroup} clippingGroup - The clipping group this context belongs to.
*/
update( parentContext, clippingGroup ) {
let update = false;
if ( parentContext.version !== this.parentVersion ) {
this.intersectionPlanes = Array.from( parentContext.intersectionPlanes );
this.unionPlanes = Array.from( parentContext.unionPlanes );
this.parentVersion = parentContext.version;
}
if ( this.clipIntersection !== clippingGroup.clipIntersection ) {
this.clipIntersection = clippingGroup.clipIntersection;
if ( this.clipIntersection ) {
this.unionPlanes.length = parentContext.unionPlanes.length;
} else {
this.intersectionPlanes.length = parentContext.intersectionPlanes.length;
}
}
const srcClippingPlanes = clippingGroup.clippingPlanes;
const l = srcClippingPlanes.length;
let dstClippingPlanes;
let offset;
if ( this.clipIntersection ) {
dstClippingPlanes = this.intersectionPlanes;
offset = parentContext.intersectionPlanes.length;
} else {
dstClippingPlanes = this.unionPlanes;
offset = parentContext.unionPlanes.length;
}
if ( dstClippingPlanes.length !== offset + l ) {
dstClippingPlanes.length = offset + l;
for ( let i = 0; i < l; i ++ ) {
dstClippingPlanes[ offset + i ] = new Vector4();
}
update = true;
}
this.projectPlanes( srcClippingPlanes, dstClippingPlanes, offset );
if ( update ) {
this.version ++;
this.cacheKey = `${ this.intersectionPlanes.length }:${ this.unionPlanes.length }`;
}
}
/**
* Returns a clipping context for the given clipping group.
*
* @param {ClippingGroup} clippingGroup - The clipping group.
* @return {ClippingContext} The clipping context.
*/
getGroupContext( clippingGroup ) {
if ( this.shadowPass && ! clippingGroup.clipShadows ) return this;
let context = this.clippingGroupContexts.get( clippingGroup );
if ( context === undefined ) {
context = new ClippingContext( this );
this.clippingGroupContexts.set( clippingGroup, context );
}
context.update( this, clippingGroup );
return context;
}
/**
* The count of union clipping planes.
*
* @type {Number}
* @readonly
*/
get unionClippingCount() {
return this.unionPlanes.length;
}
}
/**
* This module is used to represent render bundles inside the renderer
* for further processing.
*
* @private
*/
class RenderBundle {
/**
* Constructs a new bundle group.
*
* @param {BundleGroup} bundleGroup - The bundle group.
* @param {Camera} camera - The camera the bundle group is rendered with.
*/
constructor( bundleGroup, camera ) {
this.bundleGroup = bundleGroup;
this.camera = camera;
}
}
const _chainKeys$1 = [];
/**
* This renderer module manages render bundles.
*
* @private
*/
class RenderBundles {
/**
* Constructs a new render bundle management component.
*/
constructor() {
/**
* A chain map for maintaining the render bundles.
*
* @type {ChainMap}
*/
this.bundles = new ChainMap();
}
/**
* Returns a render bundle for the given bundle group and camera.
*
* @param {BundleGroup} bundleGroup - The bundle group.
* @param {Camera} camera - The camera the bundle group is rendered with.
* @return {RenderBundle} The render bundle.
*/
get( bundleGroup, camera ) {
const bundles = this.bundles;
_chainKeys$1[ 0 ] = bundleGroup;
_chainKeys$1[ 1 ] = camera;
let bundle = bundles.get( _chainKeys$1 );
if ( bundle === undefined ) {
bundle = new RenderBundle( bundleGroup, camera );
bundles.set( _chainKeys$1, bundle );
}
_chainKeys$1.length = 0;
return bundle;
}
/**
* Frees all internal resources.
*/
dispose() {
this.bundles = new ChainMap();
}
}
/**
* The purpose of a node library is to assign node implementations
* to existing library features. In `WebGPURenderer` lights, materials
* which are not based on `NodeMaterial` as well as tone mapping techniques
* are implemented with node-based modules.
*
* @private
*/
class NodeLibrary {
/**
* Constructs a new node library.
*/
constructor() {
/**
* A weak map that maps lights to light nodes.
*
* @type {WeakMap<Light.constructor,AnalyticLightNode.constructor>}
*/
this.lightNodes = new WeakMap();
/**
* A map that maps materials to node materials.
*
* @type {Map<String,NodeMaterial.constructor>}
*/
this.materialNodes = new Map();
/**
* A map that maps tone mapping techniques (constants)
* to tone mapping node functions.
*
* @type {Map<Number,Function>}
*/
this.toneMappingNodes = new Map();
}
/**
* Returns a matching node material instance for the given material object.
*
* This method also assigns/copies the properties of the given material object
* to the node material. This is done to make sure the current material
* configuration carries over to the node version.
*
* @param {Material} material - A material.
* @return {NodeMaterial} The corresponding node material.
*/
fromMaterial( material ) {
if ( material.isNodeMaterial ) return material;
let nodeMaterial = null;
const nodeMaterialClass = this.getMaterialNodeClass( material.type );
if ( nodeMaterialClass !== null ) {
nodeMaterial = new nodeMaterialClass();
for ( const key in material ) {
nodeMaterial[ key ] = material[ key ];
}
}
return nodeMaterial;
}
/**
* Adds a tone mapping node function for a tone mapping technique (constant).
*
* @param {Function} toneMappingNode - The tone mapping node function.
* @param {Number} toneMapping - The tone mapping.
*/
addToneMapping( toneMappingNode, toneMapping ) {
this.addType( toneMappingNode, toneMapping, this.toneMappingNodes );
}
/**
* Returns a tone mapping node function for a tone mapping technique (constant).
*
* @param {Number} toneMapping - The tone mapping.
* @return {Function?} The tone mapping node function. Returns `null` if no node function is found.
*/
getToneMappingFunction( toneMapping ) {
return this.toneMappingNodes.get( toneMapping ) || null;
}
/**
* Returns a node material class definition for a material type.
*
* @param {String} materialType - The material type.
* @return {NodeMaterial.constructor?} The node material class definition. Returns `null` if no node material is found.
*/
getMaterialNodeClass( materialType ) {
return this.materialNodes.get( materialType ) || null;
}
/**
* Adds a node material class definition for a given material type.
*
* @param {NodeMaterial.constructor} materialNodeClass - The node material class definition.
* @param {String} materialClassType - The material type.
*/
addMaterial( materialNodeClass, materialClassType ) {
this.addType( materialNodeClass, materialClassType, this.materialNodes );
}
/**
* Returns a light node class definition for a light class definition.
*
* @param {Light.constructor} light - The light class definition.
* @return {AnalyticLightNode.constructor?} The light node class definition. Returns `null` if no light node is found.
*/
getLightNodeClass( light ) {
return this.lightNodes.get( light ) || null;
}
/**
* Adds a light node class definition for a given light class definition.
*
* @param {AnalyticLightNode.constructor} lightNodeClass - The light node class definition.
* @param {Light.constructor} lightClass - The light class definition.
*/
addLight( lightNodeClass, lightClass ) {
this.addClass( lightNodeClass, lightClass, this.lightNodes );
}
/**
* Adds a node class definition for the given type to the provided type library.
*
* @param {Any} nodeClass - The node class definition.
* @param {Number|String} type - The object type.
* @param {Map} library - The type library.
*/
addType( nodeClass, type, library ) {
if ( library.has( type ) ) {
console.warn( `Redefinition of node ${ type }` );
return;
}
if ( typeof nodeClass !== 'function' ) throw new Error( `Node class ${ nodeClass.name } is not a class.` );
if ( typeof type === 'function' || typeof type === 'object' ) throw new Error( `Base class ${ type } is not a class.` );
library.set( type, nodeClass );
}
/**
* Adds a node class definition for the given class definition to the provided type library.
*
* @param {Any} nodeClass - The node class definition.
* @param {Any} baseClass - The class definition.
* @param {WeakMap} library - The type library.
*/
addClass( nodeClass, baseClass, library ) {
if ( library.has( baseClass ) ) {
console.warn( `Redefinition of node ${ baseClass.name }` );
return;
}
if ( typeof nodeClass !== 'function' ) throw new Error( `Node class ${ nodeClass.name } is not a class.` );
if ( typeof baseClass !== 'function' ) throw new Error( `Base class ${ baseClass.name } is not a class.` );
library.set( baseClass, nodeClass );
}
}
const _defaultLights = /*@__PURE__*/ new LightsNode();
const _chainKeys = [];
/**
* This renderer module manages the lights nodes which are unique
* per scene and camera combination.
*
* The lights node itself is later configured in the render list
* with the actual lights from the scene.
*
* @private
* @augments ChainMap
*/
class Lighting extends ChainMap {
/**
* Constructs a lighting management component.
*/
constructor() {
super();
}
/**
* Creates a new lights node for the given array of lights.
*
* @param {Array<Light>} lights - The render object.
* @return {LightsNode} The lights node.
*/
createNode( lights = [] ) {
return new LightsNode().setLights( lights );
}
/**
* Returns a lights node for the given scene and camera.
*
* @param {Scene} scene - The scene.
* @param {Camera} camera - The camera.
* @return {LightsNode} The lights node.
*/
getNode( scene, camera ) {
// ignore post-processing
if ( scene.isQuadMesh ) return _defaultLights;
_chainKeys[ 0 ] = scene;
_chainKeys[ 1 ] = camera;
let node = this.get( _chainKeys );
if ( node === undefined ) {
node = this.createNode();
this.set( _chainKeys, node );
}
_chainKeys.length = 0;
return node;
}
}
/**
* A special type of render target that is used when rendering
* with the WebXR Device API.
*
* @private
* @augments RenderTarget
*/
class XRRenderTarget extends RenderTarget {
/**
* Constructs a new XR render target.
*
* @param {Number} [width=1] - The width of the render target.
* @param {Number} [height=1] - The height of the render target.
* @param {Object} [options={}] - The configuration options.
*/
constructor( width = 1, height = 1, options = {} ) {
super( width, height, options );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isXRRenderTarget = true;
/**
* Whether the attachments of the render target
* are defined by external textures. This flag is
* set to `true` when using the WebXR Layers API.
*
* @type {Boolean}
* @default false
*/
this.hasExternalTextures = false;
/**
* Whether a depth buffer should automatically be allocated
* for this XR render target or not.
*
* Allocating a depth buffer is the default behavior of XR render
* targets. However, when using the WebXR Layers API, this flag
* must be set to `false` when the `ignoreDepthValues` property of
* the projection layers evaluates to `false`.
*
* Reference: {@link https://www.w3.org/TR/webxrlayers-1/#dom-xrprojectionlayer-ignoredepthvalues}.
*
* @type {Boolean}
* @default true
*/
this.autoAllocateDepthBuffer = true;
}
copy( source ) {
super.copy( source );
this.hasExternalTextures = source.hasExternalTextures;
this.autoAllocateDepthBuffer = source.autoAllocateDepthBuffer;
return this;
}
}
const _cameraLPos = /*@__PURE__*/ new Vector3();
const _cameraRPos = /*@__PURE__*/ new Vector3();
/**
* The XR manager is built on top of the WebXR Device API to
* manage XR sessions with `WebGPURenderer`.
*
* XR is currently only supported with a WebGL 2 backend.
*
* @augments EventDispatcher
*/
class XRManager extends EventDispatcher {
/**
* Constructs a new XR manager.
*
* @param {Renderer} renderer - The renderer.
*/
constructor( renderer ) {
super();
/**
* This flag globally enables XR rendering.
*
* @type {Boolean}
* @default false
*/
this.enabled = false;
/**
* Whether the XR device is currently presenting or not.
*
* @type {Boolean}
* @default false
* @readonly
*/
this.isPresenting = false;
/**
* Whether the XR camera should automatically be updated or not.
*
* @type {Boolean}
* @default true
*/
this.cameraAutoUpdate = true;
/**
* The renderer.
*
* @private
* @type {Renderer}
*/
this._renderer = renderer;
// camera
/**
* Represents the camera for the left eye.
*
* @private
* @type {PerspectiveCamera}
*/
this._cameraL = new PerspectiveCamera();
this._cameraL.viewport = new Vector4();
/**
* Represents the camera for the right eye.
*
* @private
* @type {PerspectiveCamera}
*/
this._cameraR = new PerspectiveCamera();
this._cameraR.viewport = new Vector4();
/**
* A list of cameras used for rendering the XR views.
*
* @private
* @type {Array<Camera>}
*/
this._cameras = [ this._cameraL, this._cameraR ];
/**
* The main XR camera.
*
* @private
* @type {ArrayCamera}
*/
this._cameraXR = new ArrayCamera();
/**
* The current near value of the XR camera.
*
* @private
* @type {Number?}
* @default null
*/
this._currentDepthNear = null;
/**
* The current far value of the XR camera.
*
* @private
* @type {Number?}
* @default null
*/
this._currentDepthFar = null;
/**
* A list of WebXR controllers requested by the application.
*
* @private
* @type {Array<WebXRController>}
*/
this._controllers = [];
/**
* A list of XR input source. Each input source belongs to
* an instance of WebXRController.
*
* @private
* @type {Array<XRInputSource?>}
*/
this._controllerInputSources = [];
/**
* The current render target of the renderer.
*
* @private
* @type {RenderTarget?}
* @default null
*/
this._currentRenderTarget = null;
/**
* The XR render target that represents the rendering destination
* during an active XR session.
*
* @private
* @type {RenderTarget?}
* @default null
*/
this._xrRenderTarget = null;
/**
* The current animation context.
*
* @private
* @type {Window?}
* @default null
*/
this._currentAnimationContext = null;
/**
* The current animation loop.
*
* @private
* @type {Function?}
* @default null
*/
this._currentAnimationLoop = null;
/**
* The current pixel ratio.
*
* @private
* @type {Number?}
* @default null
*/
this._currentPixelRatio = null;
/**
* The current size of the renderer's canvas
* in logical pixel unit.
*
* @private
* @type {Vector2}
*/
this._currentSize = new Vector2();
/**
* The default event listener for handling events inside a XR session.
*
* @private
* @type {Function}
*/
this._onSessionEvent = onSessionEvent.bind( this );
/**
* The event listener for handling the end of a XR session.
*
* @private
* @type {Function}
*/
this._onSessionEnd = onSessionEnd.bind( this );
/**
* The event listener for handling the `inputsourceschange` event.
*
* @private
* @type {Function}
*/
this._onInputSourcesChange = onInputSourcesChange.bind( this );
/**
* The animation loop which is used as a replacement for the default
* animation loop of the applicatio. It is only used when a XR session
* is active.
*
* @private
* @type {Function}
*/
this._onAnimationFrame = onAnimationFrame.bind( this );
/**
* The current XR reference space.
*
* @private
* @type {XRReferenceSpace?}
* @default null
*/
this._referenceSpace = null;
/**
* The current XR reference space type.
*
* @private
* @type {String}
* @default 'local-floor'
*/
this._referenceSpaceType = 'local-floor';
/**
* A custom reference space defined by the application.
*
* @private
* @type {XRReferenceSpace?}
* @default null
*/
this._customReferenceSpace = null;
/**
* The framebuffer scale factor.
*
* @private
* @type {Number}
* @default 1
*/
this._framebufferScaleFactor = 1;
/**
* The foveation factor.
*
* @private
* @type {Number}
* @default 1
*/
this._foveation = 1.0;
/**
* A reference to the current XR session.
*
* @private
* @type {XRSession?}
* @default null
*/
this._session = null;
/**
* A reference to the current XR base layer.
*
* @private
* @type {XRWebGLLayer?}
* @default null
*/
this._glBaseLayer = null;
/**
* A reference to the current XR binding.
*
* @private
* @type {XRWebGLBinding?}
* @default null
*/
this._glBinding = null;
/**
* A reference to the current XR projection layer.
*
* @private
* @type {XRProjectionLayer?}
* @default null
*/
this._glProjLayer = null;
/**
* A reference to the current XR frame.
*
* @private
* @type {XRFrame?}
* @default null
*/
this._xrFrame = null;
/**
* Whether to use the WebXR Layers API or not.
*
* @private
* @type {Boolean}
* @readonly
*/
this._useLayers = ( typeof XRWebGLBinding !== 'undefined' && 'createProjectionLayer' in XRWebGLBinding.prototype ); // eslint-disable-line compat/compat
}
/**
* Returns an instance of `THREE.Group` that represents the transformation
* of a XR controller in target ray space. The requested controller is defined
* by the given index.
*
* @param {Number} index - The index of the XR controller.
* @return {Group} A group that represents the controller's transformation.
*/
getController( index ) {
const controller = this._getController( index );
return controller.getTargetRaySpace();
}
/**
* Returns an instance of `THREE.Group` that represents the transformation
* of a XR controller in grip space. The requested controller is defined
* by the given index.
*
* @param {Number} index - The index of the XR controller.
* @return {Group} A group that represents the controller's transformation.
*/
getControllerGrip( index ) {
const controller = this._getController( index );
return controller.getGripSpace();
}
/**
* Returns an instance of `THREE.Group` that represents the transformation
* of a XR controller in hand space. The requested controller is defined
* by the given index.
*
* @param {Number} index - The index of the XR controller.
* @return {Group} A group that represents the controller's transformation.
*/
getHand( index ) {
const controller = this._getController( index );
return controller.getHandSpace();
}
/**
* Returns the foveation value.
*
* @return {Number|undefined} The foveation value. Returns `undefined` if no base or projection layer is defined.
*/
getFoveation() {
if ( this._glProjLayer === null && this._glBaseLayer === null ) {
return undefined;
}
return this._foveation;
}
/**
* Sets the foveation value.
*
* @param {Number} foveation - A number in the range `[0,1]` where `0` means no foveation (full resolution)
* and `1` means maximum foveation (the edges render at lower resolution).
*/
setFoveation( foveation ) {
this._foveation = foveation;
if ( this._glProjLayer !== null ) {
this._glProjLayer.fixedFoveation = foveation;
}
if ( this._glBaseLayer !== null && this._glBaseLayer.fixedFoveation !== undefined ) {
this._glBaseLayer.fixedFoveation = foveation;
}
}
/**
* Returns the framebuffer scale factor.
*
* @return {Number} The framebuffer scale factor.
*/
getFramebufferScaleFactor() {
return this._framebufferScaleFactor;
}
/**
* Sets the framebuffer scale factor.
*
* This method can not be used during a XR session.
*
* @param {Number} factor - The framebuffer scale factor.
*/
setFramebufferScaleFactor( factor ) {
this._framebufferScaleFactor = factor;
if ( this.isPresenting === true ) {
console.warn( 'THREE.XRManager: Cannot change framebuffer scale while presenting.' );
}
}
/**
* Returns the reference space type.
*
* @return {String} The reference space type.
*/
getReferenceSpaceType() {
return this._referenceSpaceType;
}
/**
* Sets the reference space type.
*
* This method can not be used during a XR session.
*
* @param {String} type - The reference space type.
*/
setReferenceSpaceType( type ) {
this._referenceSpaceType = type;
if ( this.isPresenting === true ) {
console.warn( 'THREE.XRManager: Cannot change reference space type while presenting.' );
}
}
/**
* Returns the XR reference space.
*
* @return {XRReferenceSpace} The XR reference space.
*/
getReferenceSpace() {
return this._customReferenceSpace || this._referenceSpace;
}
/**
* Sets a custom XR reference space.
*
* @param {XRReferenceSpace} space - The XR reference space.
*/
setReferenceSpace( space ) {
this._customReferenceSpace = space;
}
/**
* Returns the XR camera.
*
* @return {ArrayCamera} The XR camera.
*/
getCamera() {
return this._cameraXR;
}
/**
* Returns the environment blend mode from the current XR session.
*
* @return {('opaque'|'additive'|'alpha-blend')?} The environment blend mode. Returns `null` when used outside of a XR session.
*/
getEnvironmentBlendMode() {
if ( this._session !== null ) {
return this._session.environmentBlendMode;
}
}
/**
* Returns the current XR frame.
*
* @return {XRFrame?} The XR frame. Returns `null` when used outside a XR session.
*/
getFrame() {
return this._xrFrame;
}
/**
* Returns the current XR session.
*
* @return {XRSession?} The XR session. Returns `null` when used outside a XR session.
*/
getSession() {
return this._session;
}
/**
* After a XR session has been requested usually with one of the `*Button` modules, it
* is injected into the renderer with this method. This method triggers the start of
* the actual XR rendering.
*
* @async
* @param {XRSession} session - The XR session to set.
* @return {Promise} A Promise that resolves when the session has been set.
*/
async setSession( session ) {
const renderer = this._renderer;
const backend = renderer.backend;
const gl = renderer.getContext();
this._session = session;
if ( session !== null ) {
if ( backend.isWebGPUBackend === true ) throw new Error( 'THREE.XRManager: XR is currently not supported with a WebGPU backend. Use WebGL by passing "{ forceWebGL: true }" to the constructor of the renderer.' );
this._currentRenderTarget = renderer.getRenderTarget();
session.addEventListener( 'select', this._onSessionEvent );
session.addEventListener( 'selectstart', this._onSessionEvent );
session.addEventListener( 'selectend', this._onSessionEvent );
session.addEventListener( 'squeeze', this._onSessionEvent );
session.addEventListener( 'squeezestart', this._onSessionEvent );
session.addEventListener( 'squeezeend', this._onSessionEvent );
session.addEventListener( 'end', this._onSessionEnd );
session.addEventListener( 'inputsourceschange', this._onInputSourcesChange );
await backend.makeXRCompatible();
this._currentPixelRatio = renderer.getPixelRatio();
renderer.getSize( this._currentSize );
this._currentAnimationContext = renderer._animation.getContext();
this._currentAnimationLoop = renderer._animation.getAnimationLoop();
renderer._animation.stop();
//
if ( this._useLayers === true ) {
// default path using XRWebGLBinding/XRProjectionLayer
let depthFormat = null;
let depthType = null;
let glDepthFormat = null;
if ( renderer.depth ) {
glDepthFormat = renderer.stencil ? gl.DEPTH24_STENCIL8 : gl.DEPTH_COMPONENT24;
depthFormat = renderer.stencil ? DepthStencilFormat : DepthFormat;
depthType = renderer.stencil ? UnsignedInt248Type : UnsignedIntType;
}
const projectionlayerInit = {
colorFormat: gl.RGBA8,
depthFormat: glDepthFormat,
scaleFactor: this._framebufferScaleFactor
};
const glBinding = new XRWebGLBinding( session, gl );
const glProjLayer = glBinding.createProjectionLayer( projectionlayerInit );
this._glBinding = glBinding;
this._glProjLayer = glProjLayer;
session.updateRenderState( { layers: [ glProjLayer ] } );
renderer.setPixelRatio( 1 );
renderer.setSize( glProjLayer.textureWidth, glProjLayer.textureHeight, false );
this._xrRenderTarget = new XRRenderTarget(
glProjLayer.textureWidth,
glProjLayer.textureHeight,
{
format: RGBAFormat,
type: UnsignedByteType,
colorSpace: renderer.outputColorSpace,
depthTexture: new DepthTexture( glProjLayer.textureWidth, glProjLayer.textureHeight, depthType, undefined, undefined, undefined, undefined, undefined, undefined, depthFormat ),
stencilBuffer: renderer.stencil,
samples: renderer.samples
} );
this._xrRenderTarget.hasExternalTextures = true;
} else {
// fallback to XRWebGLLayer
const layerInit = {
antialias: renderer.samples > 0,
alpha: true,
depth: renderer.depth,
stencil: renderer.stencil,
framebufferScaleFactor: this.getFramebufferScaleFactor()
};
const glBaseLayer = new XRWebGLLayer( session, gl, layerInit );
this._glBaseLayer = glBaseLayer;
session.updateRenderState( { baseLayer: glBaseLayer } );
renderer.setPixelRatio( 1 );
renderer.setSize( glBaseLayer.framebufferWidth, glBaseLayer.framebufferHeight, false );
this._xrRenderTarget = new XRRenderTarget(
glBaseLayer.framebufferWidth,
glBaseLayer.framebufferHeight,
{
format: RGBAFormat,
type: UnsignedByteType,
colorSpace: renderer.outputColorSpace,
stencilBuffer: renderer.stencil
}
);
}
//
this.setFoveation( this.getFoveation() );
this._referenceSpace = await session.requestReferenceSpace( this.getReferenceSpaceType() );
renderer._animation.setAnimationLoop( this._onAnimationFrame );
renderer._animation.setContext( session );
renderer._animation.start();
this.isPresenting = true;
this.dispatchEvent( { type: 'sessionstart' } );
}
}
/**
* This method is called by the renderer per frame and updates the XR camera
* and it sub cameras based on the given camera. The given camera is the "user"
* camera created on application level and used for non-XR rendering.
*
* @param {PerspectiveCamera} camera - The camera.
*/
updateCamera( camera ) {
const session = this._session;
if ( session === null ) return;
const depthNear = camera.near;
const depthFar = camera.far;
const cameraXR = this._cameraXR;
const cameraL = this._cameraL;
const cameraR = this._cameraR;
cameraXR.near = cameraR.near = cameraL.near = depthNear;
cameraXR.far = cameraR.far = cameraL.far = depthFar;
if ( this._currentDepthNear !== cameraXR.near || this._currentDepthFar !== cameraXR.far ) {
// Note that the new renderState won't apply until the next frame. See #18320
session.updateRenderState( {
depthNear: cameraXR.near,
depthFar: cameraXR.far
} );
this._currentDepthNear = cameraXR.near;
this._currentDepthFar = cameraXR.far;
}
cameraL.layers.mask = camera.layers.mask | 0b010;
cameraR.layers.mask = camera.layers.mask | 0b100;
cameraXR.layers.mask = cameraL.layers.mask | cameraR.layers.mask;
const parent = camera.parent;
const cameras = cameraXR.cameras;
updateCamera( cameraXR, parent );
for ( let i = 0; i < cameras.length; i ++ ) {
updateCamera( cameras[ i ], parent );
}
// update projection matrix for proper view frustum culling
if ( cameras.length === 2 ) {
setProjectionFromUnion( cameraXR, cameraL, cameraR );
} else {
// assume single camera setup (AR)
cameraXR.projectionMatrix.copy( cameraL.projectionMatrix );
}
// update user camera and its children
updateUserCamera( camera, cameraXR, parent );
}
/**
* Returns a WebXR controller for the given controller index.
*
* @private
* @param {Number} index - The controller index.
* @return {WebXRController} The XR controller.
*/
_getController( index ) {
let controller = this._controllers[ index ];
if ( controller === undefined ) {
controller = new WebXRController();
this._controllers[ index ] = controller;
}
return controller;
}
}
/**
* Assumes 2 cameras that are parallel and share an X-axis, and that
* the cameras' projection and world matrices have already been set.
* And that near and far planes are identical for both cameras.
* Visualization of this technique: https://computergraphics.stackexchange.com/a/4765
*
* @param {ArrayCamera} camera - The camera to update.
* @param {PerspectiveCamera} cameraL - The left camera.
* @param {PerspectiveCamera} cameraR - The right camera.
*/
function setProjectionFromUnion( camera, cameraL, cameraR ) {
_cameraLPos.setFromMatrixPosition( cameraL.matrixWorld );
_cameraRPos.setFromMatrixPosition( cameraR.matrixWorld );
const ipd = _cameraLPos.distanceTo( _cameraRPos );
const projL = cameraL.projectionMatrix.elements;
const projR = cameraR.projectionMatrix.elements;
// VR systems will have identical far and near planes, and
// most likely identical top and bottom frustum extents.
// Use the left camera for these values.
const near = projL[ 14 ] / ( projL[ 10 ] - 1 );
const far = projL[ 14 ] / ( projL[ 10 ] + 1 );
const topFov = ( projL[ 9 ] + 1 ) / projL[ 5 ];
const bottomFov = ( projL[ 9 ] - 1 ) / projL[ 5 ];
const leftFov = ( projL[ 8 ] - 1 ) / projL[ 0 ];
const rightFov = ( projR[ 8 ] + 1 ) / projR[ 0 ];
const left = near * leftFov;
const right = near * rightFov;
// Calculate the new camera's position offset from the
// left camera. xOffset should be roughly half `ipd`.
const zOffset = ipd / ( - leftFov + rightFov );
const xOffset = zOffset * - leftFov;
// TODO: Better way to apply this offset?
cameraL.matrixWorld.decompose( camera.position, camera.quaternion, camera.scale );
camera.translateX( xOffset );
camera.translateZ( zOffset );
camera.matrixWorld.compose( camera.position, camera.quaternion, camera.scale );
camera.matrixWorldInverse.copy( camera.matrixWorld ).invert();
// Check if the projection uses an infinite far plane.
if ( projL[ 10 ] === - 1.0 ) {
// Use the projection matrix from the left eye.
// The camera offset is sufficient to include the view volumes
// of both eyes (assuming symmetric projections).
camera.projectionMatrix.copy( cameraL.projectionMatrix );
camera.projectionMatrixInverse.copy( cameraL.projectionMatrixInverse );
} else {
// Find the union of the frustum values of the cameras and scale
// the values so that the near plane's position does not change in world space,
// although must now be relative to the new union camera.
const near2 = near + zOffset;
const far2 = far + zOffset;
const left2 = left - xOffset;
const right2 = right + ( ipd - xOffset );
const top2 = topFov * far / far2 * near2;
const bottom2 = bottomFov * far / far2 * near2;
camera.projectionMatrix.makePerspective( left2, right2, top2, bottom2, near2, far2 );
camera.projectionMatrixInverse.copy( camera.projectionMatrix ).invert();
}
}
/**
* Updates the world matrices for the given camera based on the parent 3D object.
*
* @inner
* @param {Camera} camera - The camera to update.
* @param {Object3D} parent - The parent 3D object.
*/
function updateCamera( camera, parent ) {
if ( parent === null ) {
camera.matrixWorld.copy( camera.matrix );
} else {
camera.matrixWorld.multiplyMatrices( parent.matrixWorld, camera.matrix );
}
camera.matrixWorldInverse.copy( camera.matrixWorld ).invert();
}
/**
* Updates the given camera with the transfomration of the XR camera and parent object.
*
* @inner
* @param {Camera} camera - The camera to update.
* @param {ArrayCamera} cameraXR - The XR camera.
* @param {Object3D} parent - The parent 3D object.
*/
function updateUserCamera( camera, cameraXR, parent ) {
if ( parent === null ) {
camera.matrix.copy( cameraXR.matrixWorld );
} else {
camera.matrix.copy( parent.matrixWorld );
camera.matrix.invert();
camera.matrix.multiply( cameraXR.matrixWorld );
}
camera.matrix.decompose( camera.position, camera.quaternion, camera.scale );
camera.updateMatrixWorld( true );
camera.projectionMatrix.copy( cameraXR.projectionMatrix );
camera.projectionMatrixInverse.copy( cameraXR.projectionMatrixInverse );
if ( camera.isPerspectiveCamera ) {
camera.fov = RAD2DEG * 2 * Math.atan( 1 / camera.projectionMatrix.elements[ 5 ] );
camera.zoom = 1;
}
}
function onSessionEvent( event ) {
const controllerIndex = this._controllerInputSources.indexOf( event.inputSource );
if ( controllerIndex === - 1 ) {
return;
}
const controller = this._controllers[ controllerIndex ];
if ( controller !== undefined ) {
const referenceSpace = this.getReferenceSpace();
controller.update( event.inputSource, event.frame, referenceSpace );
controller.dispatchEvent( { type: event.type, data: event.inputSource } );
}
}
function onSessionEnd() {
const session = this._session;
const renderer = this._renderer;
session.removeEventListener( 'select', this._onSessionEvent );
session.removeEventListener( 'selectstart', this._onSessionEvent );
session.removeEventListener( 'selectend', this._onSessionEvent );
session.removeEventListener( 'squeeze', this._onSessionEvent );
session.removeEventListener( 'squeezestart', this._onSessionEvent );
session.removeEventListener( 'squeezeend', this._onSessionEvent );
session.removeEventListener( 'end', this._onSessionEnd );
session.removeEventListener( 'inputsourceschange', this._onInputSourcesChange );
for ( let i = 0; i < this._controllers.length; i ++ ) {
const inputSource = this._controllerInputSources[ i ];
if ( inputSource === null ) continue;
this._controllerInputSources[ i ] = null;
this._controllers[ i ].disconnect( inputSource );
}
this._currentDepthNear = null;
this._currentDepthFar = null;
// restore framebuffer/rendering state
renderer.backend.setXRTarget( null );
renderer.setRenderTarget( this._currentRenderTarget );
this._session = null;
this._xrRenderTarget = null;
//
this.isPresenting = false;
renderer._animation.stop();
renderer._animation.setAnimationLoop( this._currentAnimationLoop );
renderer._animation.setContext( this._currentAnimationContext );
renderer._animation.start();
renderer.setPixelRatio( this._currentPixelRatio );
renderer.setSize( this._currentSize.width, this._currentSize.height, false );
this.dispatchEvent( { type: 'sessionend' } );
}
function onInputSourcesChange( event ) {
const controllers = this._controllers;
const controllerInputSources = this._controllerInputSources;
// Notify disconnected
for ( let i = 0; i < event.removed.length; i ++ ) {
const inputSource = event.removed[ i ];
const index = controllerInputSources.indexOf( inputSource );
if ( index >= 0 ) {
controllerInputSources[ index ] = null;
controllers[ index ].disconnect( inputSource );
}
}
// Notify connected
for ( let i = 0; i < event.added.length; i ++ ) {
const inputSource = event.added[ i ];
let controllerIndex = controllerInputSources.indexOf( inputSource );
if ( controllerIndex === - 1 ) {
// Assign input source a controller that currently has no input source
for ( let i = 0; i < controllers.length; i ++ ) {
if ( i >= controllerInputSources.length ) {
controllerInputSources.push( inputSource );
controllerIndex = i;
break;
} else if ( controllerInputSources[ i ] === null ) {
controllerInputSources[ i ] = inputSource;
controllerIndex = i;
break;
}
}
// If all controllers do currently receive input we ignore new ones
if ( controllerIndex === - 1 ) break;
}
const controller = controllers[ controllerIndex ];
if ( controller ) {
controller.connect( inputSource );
}
}
}
function onAnimationFrame( time, frame ) {
if ( frame === undefined ) return;
const cameraXR = this._cameraXR;
const renderer = this._renderer;
const backend = renderer.backend;
const glBaseLayer = this._glBaseLayer;
const referenceSpace = this.getReferenceSpace();
const pose = frame.getViewerPose( referenceSpace );
this._xrFrame = frame;
if ( pose !== null ) {
const views = pose.views;
if ( this._glBaseLayer !== null ) {
backend.setXRTarget( glBaseLayer.framebuffer );
}
let cameraXRNeedsUpdate = false;
// check if it's necessary to rebuild cameraXR's camera list
if ( views.length !== cameraXR.cameras.length ) {
cameraXR.cameras.length = 0;
cameraXRNeedsUpdate = true;
}
for ( let i = 0; i < views.length; i ++ ) {
const view = views[ i ];
let viewport;
if ( this._useLayers === true ) {
const glSubImage = this._glBinding.getViewSubImage( this._glProjLayer, view );
viewport = glSubImage.viewport;
// For side-by-side projection, we only produce a single texture for both eyes.
if ( i === 0 ) {
backend.setXRRenderTargetTextures(
this._xrRenderTarget,
glSubImage.colorTexture,
this._glProjLayer.ignoreDepthValues ? undefined : glSubImage.depthStencilTexture
);
}
} else {
viewport = glBaseLayer.getViewport( view );
}
let camera = this._cameras[ i ];
if ( camera === undefined ) {
camera = new PerspectiveCamera();
camera.layers.enable( i );
camera.viewport = new Vector4();
this._cameras[ i ] = camera;
}
camera.matrix.fromArray( view.transform.matrix );
camera.matrix.decompose( camera.position, camera.quaternion, camera.scale );
camera.projectionMatrix.fromArray( view.projectionMatrix );
camera.projectionMatrixInverse.copy( camera.projectionMatrix ).invert();
camera.viewport.set( viewport.x, viewport.y, viewport.width, viewport.height );
if ( i === 0 ) {
cameraXR.matrix.copy( camera.matrix );
cameraXR.matrix.decompose( cameraXR.position, cameraXR.quaternion, cameraXR.scale );
}
if ( cameraXRNeedsUpdate === true ) {
cameraXR.cameras.push( camera );
}
}
renderer.setRenderTarget( this._xrRenderTarget );
}
//
for ( let i = 0; i < this._controllers.length; i ++ ) {
const inputSource = this._controllerInputSources[ i ];
const controller = this._controllers[ i ];
if ( inputSource !== null && controller !== undefined ) {
controller.update( inputSource, frame, referenceSpace );
}
}
if ( this._currentAnimationLoop ) this._currentAnimationLoop( time, frame );
if ( frame.detectedPlanes ) {
this.dispatchEvent( { type: 'planesdetected', data: frame } );
}
this._xrFrame = null;
}
/** @module Renderer **/
const _scene = /*@__PURE__*/ new Scene();
const _drawingBufferSize = /*@__PURE__*/ new Vector2();
const _screen = /*@__PURE__*/ new Vector4();
const _frustum = /*@__PURE__*/ new Frustum();
const _projScreenMatrix = /*@__PURE__*/ new Matrix4();
const _vector4 = /*@__PURE__*/ new Vector4();
/**
* Base class for renderers.
*/
class Renderer {
/**
* Constructs a new renderer.
*
* @param {Backend} backend - The backend the renderer is targeting (e.g. WebGPU or WebGL 2).
* @param {Object} parameters - The configuration parameter.
* @param {Boolean} [parameters.logarithmicDepthBuffer=false] - Whether logarithmic depth buffer is enabled or not.
* @param {Boolean} [parameters.alpha=true] - Whether the default framebuffer (which represents the final contents of the canvas) should be transparent or opaque.
* @param {Boolean} [parameters.depth=true] - Whether the default framebuffer should have a depth buffer or not.
* @param {Boolean} [parameters.stencil=false] - Whether the default framebuffer should have a stencil buffer or not.
* @param {Boolean} [parameters.antialias=false] - Whether MSAA as the default anti-aliasing should be enabled or not.
* @param {Number} [parameters.samples=0] - When `antialias` is `true`, `4` samples are used by default. This parameter can set to any other integer value than 0
* to overwrite the default.
* @param {Function?} [parameters.getFallback=null] - This callback function can be used to provide a fallback backend, if the primary backend can't be targeted.
* @param {Number} [parameters.colorBufferType=HalfFloatType] - Defines the type of color buffers. The default `HalfFloatType` is recommend for best
* quality. To save memory and bandwidth, `UnsignedByteType` might be used. This will reduce rendering quality though.
*/
constructor( backend, parameters = {} ) {
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isRenderer = true;
//
const {
logarithmicDepthBuffer = false,
alpha = true,
depth = true,
stencil = false,
antialias = false,
samples = 0,
getFallback = null,
colorBufferType = HalfFloatType
} = parameters;
/**
* A reference to the canvas element the renderer is drawing to.
* This value of this property will automatically be created by
* the renderer.
*
* @type {HTMLCanvasElement|OffscreenCanvas}
*/
this.domElement = backend.getDomElement();
/**
* A reference to the current backend.
*
* @type {Backend}
*/
this.backend = backend;
/**
* The number of MSAA samples.
*
* @type {Number}
* @default 0
*/
this.samples = samples || ( antialias === true ) ? 4 : 0;
/**
* Whether the renderer should automatically clear the current rendering target
* before execute a `render()` call. The target can be the canvas (default framebuffer)
* or the current bound render target (custom framebuffer).
*
* @type {Boolean}
* @default true
*/
this.autoClear = true;
/**
* When `autoClear` is set to `true`, this property defines whether the renderer
* should clear the color buffer.
*
* @type {Boolean}
* @default true
*/
this.autoClearColor = true;
/**
* When `autoClear` is set to `true`, this property defines whether the renderer
* should clear the depth buffer.
*
* @type {Boolean}
* @default true
*/
this.autoClearDepth = true;
/**
* When `autoClear` is set to `true`, this property defines whether the renderer
* should clear the stencil buffer.
*
* @type {Boolean}
* @default true
*/
this.autoClearStencil = true;
/**
* Whether the default framebuffer should be transparent or opaque.
*
* @type {Boolean}
* @default true
*/
this.alpha = alpha;
/**
* Whether logarithmic depth buffer is enabled or not.
*
* @type {Boolean}
* @default false
*/
this.logarithmicDepthBuffer = logarithmicDepthBuffer;
/**
* Defines the output color space of the renderer.
*
* @type {String}
* @default SRGBColorSpace
*/
this.outputColorSpace = SRGBColorSpace;
/**
* Defines the tone mapping of the renderer.
*
* @type {Number}
* @default NoToneMapping
*/
this.toneMapping = NoToneMapping;
/**
* Defines the tone mapping exposure.
*
* @type {Number}
* @default 1
*/
this.toneMappingExposure = 1.0;
/**
* Whether the renderer should sort its render lists or not.
*
* Note: Sorting is used to attempt to properly render objects that have some degree of transparency.
* By definition, sorting objects may not work in all cases. Depending on the needs of application,
* it may be necessary to turn off sorting and use other methods to deal with transparency rendering
* e.g. manually determining each object's rendering order.
*
* @type {Boolean}
* @default true
*/
this.sortObjects = true;
/**
* Whether the default framebuffer should have a depth buffer or not.
*
* @type {Boolean}
* @default true
*/
this.depth = depth;
/**
* Whether the default framebuffer should have a stencil buffer or not.
*
* @type {Boolean}
* @default false
*/
this.stencil = stencil;
/**
* Holds a series of statistical information about the GPU memory
* and the rendering process. Useful for debugging and monitoring.
*
* @type {Info}
*/
this.info = new Info();
this.nodes = {
modelViewMatrix: null,
modelNormalViewMatrix: null
};
/**
* The node library defines how certain library objects like materials, lights
* or tone mapping functions are mapped to node types. This is required since
* although instances of classes like `MeshBasicMaterial` or `PointLight` can
* be part of the scene graph, they are internally represented as nodes for
* further processing.
*
* @type {NodeLibrary}
*/
this.library = new NodeLibrary();
/**
* A map-like data structure for managing lights.
*
* @type {Lighting}
*/
this.lighting = new Lighting();
// internals
/**
* This callback function can be used to provide a fallback backend, if the primary backend can't be targeted.
*
* @private
* @type {Function}
*/
this._getFallback = getFallback;
/**
* The renderer's pixel ration.
*
* @private
* @type {Number}
* @default 1
*/
this._pixelRatio = 1;
/**
* The width of the renderer's default framebuffer in logical pixel unit.
*
* @private
* @type {Number}
*/
this._width = this.domElement.width;
/**
* The height of the renderer's default framebuffer in logical pixel unit.
*
* @private
* @type {Number}
*/
this._height = this.domElement.height;
/**
* The viewport of the renderer in logical pixel unit.
*
* @private
* @type {Vector4}
*/
this._viewport = new Vector4( 0, 0, this._width, this._height );
/**
* The scissor rectangle of the renderer in logical pixel unit.
*
* @private
* @type {Vector4}
*/
this._scissor = new Vector4( 0, 0, this._width, this._height );
/**
* Whether the scissor test should be enabled or not.
*
* @private
* @type {Boolean}
*/
this._scissorTest = false;
/**
* A reference to a renderer module for managing shader attributes.
*
* @private
* @type {Attributes?}
* @default null
*/
this._attributes = null;
/**
* A reference to a renderer module for managing geometries.
*
* @private
* @type {Geometries?}
* @default null
*/
this._geometries = null;
/**
* A reference to a renderer module for managing node related logic.
*
* @private
* @type {Nodes?}
* @default null
*/
this._nodes = null;
/**
* A reference to a renderer module for managing the internal animation loop.
*
* @private
* @type {Animation?}
* @default null
*/
this._animation = null;
/**
* A reference to a renderer module for managing shader program bindings.
*
* @private
* @type {Bindings?}
* @default null
*/
this._bindings = null;
/**
* A reference to a renderer module for managing render objects.
*
* @private
* @type {RenderObjects?}
* @default null
*/
this._objects = null;
/**
* A reference to a renderer module for managing render and compute pipelines.
*
* @private
* @type {Pipelines?}
* @default null
*/
this._pipelines = null;
/**
* A reference to a renderer module for managing render bundles.
*
* @private
* @type {RenderBundles?}
* @default null
*/
this._bundles = null;
/**
* A reference to a renderer module for managing render lists.
*
* @private
* @type {RenderLists?}
* @default null
*/
this._renderLists = null;
/**
* A reference to a renderer module for managing render contexts.
*
* @private
* @type {RenderContexts?}
* @default null
*/
this._renderContexts = null;
/**
* A reference to a renderer module for managing textures.
*
* @private
* @type {Textures?}
* @default null
*/
this._textures = null;
/**
* A reference to a renderer module for backgrounds.
*
* @private
* @type {Background?}
* @default null
*/
this._background = null;
/**
* This fullscreen quad is used for internal render passes
* like the tone mapping and color space output pass.
*
* @private
* @type {QuadMesh}
*/
this._quad = new QuadMesh( new NodeMaterial() );
this._quad.material.name = 'Renderer_output';
/**
* A reference to the current render context.
*
* @private
* @type {RenderContext?}
* @default null
*/
this._currentRenderContext = null;
/**
* A custom sort function for the opaque render list.
*
* @private
* @type {Function?}
* @default null
*/
this._opaqueSort = null;
/**
* A custom sort function for the transparent render list.
*
* @private
* @type {Function?}
* @default null
*/
this._transparentSort = null;
/**
* The framebuffer target.
*
* @private
* @type {RenderTarget?}
* @default null
*/
this._frameBufferTarget = null;
const alphaClear = this.alpha === true ? 0 : 1;
/**
* The clear color value.
*
* @private
* @type {Color4}
*/
this._clearColor = new Color4( 0, 0, 0, alphaClear );
/**
* The clear depth value.
*
* @private
* @type {Number}
* @default 1
*/
this._clearDepth = 1;
/**
* The clear stencil value.
*
* @private
* @type {Number}
* @default 0
*/
this._clearStencil = 0;
/**
* The current render target.
*
* @private
* @type {RenderTarget?}
* @default null
*/
this._renderTarget = null;
/**
* The active cube face.
*
* @private
* @type {Number}
* @default 0
*/
this._activeCubeFace = 0;
/**
* The active mipmap level.
*
* @private
* @type {Number}
* @default 0
*/
this._activeMipmapLevel = 0;
/**
* The MRT setting.
*
* @private
* @type {MRTNode?}
* @default null
*/
this._mrt = null;
/**
* This function defines how a render object is going
* to be rendered.
*
* @private
* @type {Function?}
* @default null
*/
this._renderObjectFunction = null;
/**
* Used to keep track of the current render object function.
*
* @private
* @type {Function?}
* @default null
*/
this._currentRenderObjectFunction = null;
/**
* Used to keep track of the current render bundle.
*
* @private
* @type {RenderBundle?}
* @default null
*/
this._currentRenderBundle = null;
/**
* Next to `_renderObjectFunction()`, this function provides another hook
* for influencing the render process of a render object. It is meant for internal
* use and only relevant for `compileAsync()` right now. Instead of using
* the default logic of `_renderObjectDirect()` which actually draws the render object,
* a different function might be used which performs no draw but just the node
* and pipeline updates.
*
* @private
* @type {Function?}
* @default null
*/
this._handleObjectFunction = this._renderObjectDirect;
/**
* Indicates whether the device has been lost or not. In WebGL terms, the device
* lost is considered as a context lost. When this is set to `true`, rendering
* isn't possible anymore.
*
* @private
* @type {Boolean}
* @default false
*/
this._isDeviceLost = false;
/**
* A callback function that defines what should happen when a device/context lost occurs.
*
* @type {Function}
*/
this.onDeviceLost = this._onDeviceLost;
/**
* Defines the type of color buffers. The default `HalfFloatType` is recommend for
* best quality. To save memory and bandwidth, `UnsignedByteType` might be used.
* This will reduce rendering quality though.
*
* @private
* @type {Number}
* @default HalfFloatType
*/
this._colorBufferType = colorBufferType;
/**
* Whether the renderer has been initialized or not.
*
* @private
* @type {Boolean}
* @default false
*/
this._initialized = false;
/**
* A reference to the promise which initializes the renderer.
*
* @private
* @type {Promise?}
* @default null
*/
this._initPromise = null;
/**
* An array of compilation promises which are used in `compileAsync()`.
*
* @private
* @type {Array<Promise>?}
* @default null
*/
this._compilationPromises = null;
/**
* Whether the renderer should render transparent render objects or not.
*
* @type {Boolean}
* @default true
*/
this.transparent = true;
/**
* Whether the renderer should render opaque render objects or not.
*
* @type {Boolean}
* @default true
*/
this.opaque = true;
/**
* Shadow map configuration
* @typedef {Object} ShadowMapConfig
* @property {Boolean} enabled - Whether to globally enable shadows or not.
* @property {Number} type - The shadow map type.
*/
/**
* The renderer's shadow configuration.
*
* @type {module:Renderer~ShadowMapConfig}
*/
this.shadowMap = {
enabled: false,
type: PCFShadowMap
};
/**
* XR configuration.
* @typedef {Object} XRConfig
* @property {Boolean} enabled - Whether to globally enable XR or not.
*/
/**
* The renderer's XR manager.
*
* @type {XRManager}
*/
this.xr = new XRManager( this );
/**
* Debug configuration.
* @typedef {Object} DebugConfig
* @property {Boolean} checkShaderErrors - Whether shader errors should be checked or not.
* @property {Function} onShaderError - A callback function that is executed when a shader error happens. Only supported with WebGL 2 right now.
* @property {Function} getShaderAsync - Allows the get the raw shader code for the given scene, camera and 3D object.
*/
/**
* The renderer's debug configuration.
*
* @type {module:Renderer~DebugConfig}
*/
this.debug = {
checkShaderErrors: true,
onShaderError: null,
getShaderAsync: async ( scene, camera, object ) => {
await this.compileAsync( scene, camera );
const renderList = this._renderLists.get( scene, camera );
const renderContext = this._renderContexts.get( scene, camera, this._renderTarget );
const material = scene.overrideMaterial || object.material;
const renderObject = this._objects.get( object, material, scene, camera, renderList.lightsNode, renderContext, renderContext.clippingContext );
const { fragmentShader, vertexShader } = renderObject.getNodeBuilderState();
return { fragmentShader, vertexShader };
}
};
}
/**
* Initializes the renderer so it is ready for usage.
*
* @async
* @return {Promise} A Promise that resolves when the renderer has been initialized.
*/
async init() {
if ( this._initialized ) {
throw new Error( 'Renderer: Backend has already been initialized.' );
}
if ( this._initPromise !== null ) {
return this._initPromise;
}
this._initPromise = new Promise( async ( resolve, reject ) => {
let backend = this.backend;
try {
await backend.init( this );
} catch ( error ) {
if ( this._getFallback !== null ) {
// try the fallback
try {
this.backend = backend = this._getFallback( error );
await backend.init( this );
} catch ( error ) {
reject( error );
return;
}
} else {
reject( error );
return;
}
}
this._nodes = new Nodes( this, backend );
this._animation = new Animation( this._nodes, this.info );
this._attributes = new Attributes( backend );
this._background = new Background( this, this._nodes );
this._geometries = new Geometries( this._attributes, this.info );
this._textures = new Textures( this, backend, this.info );
this._pipelines = new Pipelines( backend, this._nodes );
this._bindings = new Bindings( backend, this._nodes, this._textures, this._attributes, this._pipelines, this.info );
this._objects = new RenderObjects( this, this._nodes, this._geometries, this._pipelines, this._bindings, this.info );
this._renderLists = new RenderLists( this.lighting );
this._bundles = new RenderBundles();
this._renderContexts = new RenderContexts();
//
this._animation.start();
this._initialized = true;
resolve();
} );
return this._initPromise;
}
/**
* The coordinate system of the renderer. The value of this property
* depends on the selected backend. Either `THREE.WebGLCoordinateSystem` or
* `THREE.WebGPUCoordinateSystem`.
*
* @readonly
* @type {Number}
*/
get coordinateSystem() {
return this.backend.coordinateSystem;
}
/**
* Compiles all materials in the given scene. This can be useful to avoid a
* phenomenon which is called "shader compilation stutter", which occurs when
* rendering an object with a new shader for the first time.
*
* If you want to add a 3D object to an existing scene, use the third optional
* parameter for applying the target scene. Note that the (target) scene's lighting
* and environment must be configured before calling this method.
*
* @async
* @param {Object3D} scene - The scene or 3D object to precompile.
* @param {Camera} camera - The camera that is used to render the scene.
* @param {Scene} targetScene - If the first argument is a 3D object, this parameter must represent the scene the 3D object is going to be added.
* @return {Promise<Array>} A Promise that resolves when the compile has been finished.
*/
async compileAsync( scene, camera, targetScene = null ) {
if ( this._isDeviceLost === true ) return;
if ( this._initialized === false ) await this.init();
// preserve render tree
const nodeFrame = this._nodes.nodeFrame;
const previousRenderId = nodeFrame.renderId;
const previousRenderContext = this._currentRenderContext;
const previousRenderObjectFunction = this._currentRenderObjectFunction;
const previousCompilationPromises = this._compilationPromises;
//
const sceneRef = ( scene.isScene === true ) ? scene : _scene;
if ( targetScene === null ) targetScene = scene;
const renderTarget = this._renderTarget;
const renderContext = this._renderContexts.get( targetScene, camera, renderTarget );
const activeMipmapLevel = this._activeMipmapLevel;
const compilationPromises = [];
this._currentRenderContext = renderContext;
this._currentRenderObjectFunction = this.renderObject;
this._handleObjectFunction = this._createObjectPipeline;
this._compilationPromises = compilationPromises;
nodeFrame.renderId ++;
//
nodeFrame.update();
//
renderContext.depth = this.depth;
renderContext.stencil = this.stencil;
if ( ! renderContext.clippingContext ) renderContext.clippingContext = new ClippingContext();
renderContext.clippingContext.updateGlobal( sceneRef, camera );
//
sceneRef.onBeforeRender( this, scene, camera, renderTarget );
//
const renderList = this._renderLists.get( scene, camera );
renderList.begin();
this._projectObject( scene, camera, 0, renderList, renderContext.clippingContext );
// include lights from target scene
if ( targetScene !== scene ) {
targetScene.traverseVisible( function ( object ) {
if ( object.isLight && object.layers.test( camera.layers ) ) {
renderList.pushLight( object );
}
} );
}
renderList.finish();
//
if ( renderTarget !== null ) {
this._textures.updateRenderTarget( renderTarget, activeMipmapLevel );
const renderTargetData = this._textures.get( renderTarget );
renderContext.textures = renderTargetData.textures;
renderContext.depthTexture = renderTargetData.depthTexture;
} else {
renderContext.textures = null;
renderContext.depthTexture = null;
}
//
this._background.update( sceneRef, renderList, renderContext );
// process render lists
const opaqueObjects = renderList.opaque;
const transparentObjects = renderList.transparent;
const transparentDoublePassObjects = renderList.transparentDoublePass;
const lightsNode = renderList.lightsNode;
if ( this.opaque === true && opaqueObjects.length > 0 ) this._renderObjects( opaqueObjects, camera, sceneRef, lightsNode );
if ( this.transparent === true && transparentObjects.length > 0 ) this._renderTransparents( transparentObjects, transparentDoublePassObjects, camera, sceneRef, lightsNode );
// restore render tree
nodeFrame.renderId = previousRenderId;
this._currentRenderContext = previousRenderContext;
this._currentRenderObjectFunction = previousRenderObjectFunction;
this._compilationPromises = previousCompilationPromises;
this._handleObjectFunction = this._renderObjectDirect;
// wait for all promises setup by backends awaiting compilation/linking/pipeline creation to complete
await Promise.all( compilationPromises );
}
/**
* Renders the scene in an async fashion.
*
* @async
* @param {Object3D} scene - The scene or 3D object to render.
* @param {Camera} camera - The camera.
* @return {Promise} A Promise that resolves when the render has been finished.
*/
async renderAsync( scene, camera ) {
if ( this._initialized === false ) await this.init();
this._renderScene( scene, camera );
}
/**
* Can be used to synchronize CPU operations with GPU tasks. So when this method is called,
* the CPU waits for the GPU to complete its operation (e.g. a compute task).
*
* @async
* @return {Promise} A Promise that resolves when synchronization has been finished.
*/
async waitForGPU() {
await this.backend.waitForGPU();
}
/**
* Sets the given MRT configuration.
*
* @param {MRTNode} mrt - The MRT node to set.
* @return {Renderer} A reference to this renderer.
*/
setMRT( mrt ) {
this._mrt = mrt;
return this;
}
/**
* Returns the MRT configuration.
*
* @return {MRTNode} The MRT configuration.
*/
getMRT() {
return this._mrt;
}
/**
* Returns the color buffer type.
*
* @return {Number} The color buffer type.
*/
getColorBufferType() {
return this._colorBufferType;
}
/**
* Default implementation of the device lost callback.
*
* @private
* @param {Object} info - Information about the context lost.
*/
_onDeviceLost( info ) {
let errorMessage = `THREE.WebGPURenderer: ${info.api} Device Lost:\n\nMessage: ${info.message}`;
if ( info.reason ) {
errorMessage += `\nReason: ${info.reason}`;
}
console.error( errorMessage );
this._isDeviceLost = true;
}
/**
* Renders the given render bundle.
*
* @private
* @param {Object} bundle - Render bundle data.
* @param {Scene} sceneRef - The scene the render bundle belongs to.
* @param {LightsNode} lightsNode - The current lights node.
*/
_renderBundle( bundle, sceneRef, lightsNode ) {
const { bundleGroup, camera, renderList } = bundle;
const renderContext = this._currentRenderContext;
//
const renderBundle = this._bundles.get( bundleGroup, camera );
const renderBundleData = this.backend.get( renderBundle );
if ( renderBundleData.renderContexts === undefined ) renderBundleData.renderContexts = new Set();
//
const needsUpdate = bundleGroup.version !== renderBundleData.version;
const renderBundleNeedsUpdate = renderBundleData.renderContexts.has( renderContext ) === false || needsUpdate;
renderBundleData.renderContexts.add( renderContext );
if ( renderBundleNeedsUpdate ) {
this.backend.beginBundle( renderContext );
if ( renderBundleData.renderObjects === undefined || needsUpdate ) {
renderBundleData.renderObjects = [];
}
this._currentRenderBundle = renderBundle;
const opaqueObjects = renderList.opaque;
if ( this.opaque === true && opaqueObjects.length > 0 ) this._renderObjects( opaqueObjects, camera, sceneRef, lightsNode );
this._currentRenderBundle = null;
//
this.backend.finishBundle( renderContext, renderBundle );
renderBundleData.version = bundleGroup.version;
} else {
const { renderObjects } = renderBundleData;
for ( let i = 0, l = renderObjects.length; i < l; i ++ ) {
const renderObject = renderObjects[ i ];
if ( this._nodes.needsRefresh( renderObject ) ) {
this._nodes.updateBefore( renderObject );
this._nodes.updateForRender( renderObject );
this._bindings.updateForRender( renderObject );
this._nodes.updateAfter( renderObject );
}
}
}
this.backend.addBundle( renderContext, renderBundle );
}
/**
* Renders the scene or 3D object with the given camera. This method can only be called
* if the renderer has been initialized.
*
* The target of the method is the default framebuffer (meaning the canvas)
* or alternatively a render target when specified via `setRenderTarget()`.
*
* @param {Object3D} scene - The scene or 3D object to render.
* @param {Camera} camera - The camera to render the scene with.
* @return {Promise?} A Promise that resolve when the scene has been rendered.
* Only returned when the renderer has not been initialized.
*/
render( scene, camera ) {
if ( this._initialized === false ) {
console.warn( 'THREE.Renderer: .render() called before the backend is initialized. Try using .renderAsync() instead.' );
return this.renderAsync( scene, camera );
}
this._renderScene( scene, camera );
}
/**
* Returns an internal render target which is used when computing the output tone mapping
* and color space conversion. Unlike in `WebGLRenderer`, this is done in a separate render
* pass and not inline to achieve more correct results.
*
* @private
* @return {RenderTarget?} The render target. The method returns `null` if no output conversion should be applied.
*/
_getFrameBufferTarget() {
const { currentToneMapping, currentColorSpace } = this;
const useToneMapping = currentToneMapping !== NoToneMapping;
const useColorSpace = currentColorSpace !== LinearSRGBColorSpace;
if ( useToneMapping === false && useColorSpace === false ) return null;
const { width, height } = this.getDrawingBufferSize( _drawingBufferSize );
const { depth, stencil } = this;
let frameBufferTarget = this._frameBufferTarget;
if ( frameBufferTarget === null ) {
frameBufferTarget = new RenderTarget( width, height, {
depthBuffer: depth,
stencilBuffer: stencil,
type: this._colorBufferType,
format: RGBAFormat,
colorSpace: LinearSRGBColorSpace,
generateMipmaps: false,
minFilter: LinearFilter,
magFilter: LinearFilter,
samples: this.samples
} );
frameBufferTarget.isPostProcessingRenderTarget = true;
this._frameBufferTarget = frameBufferTarget;
}
frameBufferTarget.depthBuffer = depth;
frameBufferTarget.stencilBuffer = stencil;
frameBufferTarget.setSize( width, height );
frameBufferTarget.viewport.copy( this._viewport );
frameBufferTarget.scissor.copy( this._scissor );
frameBufferTarget.viewport.multiplyScalar( this._pixelRatio );
frameBufferTarget.scissor.multiplyScalar( this._pixelRatio );
frameBufferTarget.scissorTest = this._scissorTest;
return frameBufferTarget;
}
/**
* Renders the scene or 3D object with the given camera.
*
* @private
* @param {Object3D} scene - The scene or 3D object to render.
* @param {Camera} camera - The camera to render the scene with.
* @param {Boolean} [useFrameBufferTarget=true] - Whether to use a framebuffer target or not.
* @return {RenderContext} The current render context.
*/
_renderScene( scene, camera, useFrameBufferTarget = true ) {
if ( this._isDeviceLost === true ) return;
const frameBufferTarget = useFrameBufferTarget ? this._getFrameBufferTarget() : null;
// preserve render tree
const nodeFrame = this._nodes.nodeFrame;
const previousRenderId = nodeFrame.renderId;
const previousRenderContext = this._currentRenderContext;
const previousRenderObjectFunction = this._currentRenderObjectFunction;
//
const sceneRef = ( scene.isScene === true ) ? scene : _scene;
const outputRenderTarget = this._renderTarget;
const activeCubeFace = this._activeCubeFace;
const activeMipmapLevel = this._activeMipmapLevel;
//
let renderTarget;
if ( frameBufferTarget !== null ) {
renderTarget = frameBufferTarget;
this.setRenderTarget( renderTarget );
} else {
renderTarget = outputRenderTarget;
}
//
const renderContext = this._renderContexts.get( scene, camera, renderTarget );
this._currentRenderContext = renderContext;
this._currentRenderObjectFunction = this._renderObjectFunction || this.renderObject;
//
this.info.calls ++;
this.info.render.calls ++;
this.info.render.frameCalls ++;
nodeFrame.renderId = this.info.calls;
//
const coordinateSystem = this.coordinateSystem;
const xr = this.xr;
if ( camera.coordinateSystem !== coordinateSystem && xr.isPresenting === false ) {
camera.coordinateSystem = coordinateSystem;
camera.updateProjectionMatrix();
if ( camera.isArrayCamera ) {
for ( const subCamera of camera.cameras ) {
subCamera.coordinateSystem = coordinateSystem;
subCamera.updateProjectionMatrix();
}
}
}
//
if ( scene.matrixWorldAutoUpdate === true ) scene.updateMatrixWorld();
if ( camera.parent === null && camera.matrixWorldAutoUpdate === true ) camera.updateMatrixWorld();
if ( xr.enabled === true && xr.isPresenting === true ) {
if ( xr.cameraAutoUpdate === true ) xr.updateCamera( camera );
camera = xr.getCamera(); // use XR camera for rendering
}
//
let viewport = this._viewport;
let scissor = this._scissor;
let pixelRatio = this._pixelRatio;
if ( renderTarget !== null ) {
viewport = renderTarget.viewport;
scissor = renderTarget.scissor;
pixelRatio = 1;
}
this.getDrawingBufferSize( _drawingBufferSize );
_screen.set( 0, 0, _drawingBufferSize.width, _drawingBufferSize.height );
const minDepth = ( viewport.minDepth === undefined ) ? 0 : viewport.minDepth;
const maxDepth = ( viewport.maxDepth === undefined ) ? 1 : viewport.maxDepth;
renderContext.viewportValue.copy( viewport ).multiplyScalar( pixelRatio ).floor();
renderContext.viewportValue.width >>= activeMipmapLevel;
renderContext.viewportValue.height >>= activeMipmapLevel;
renderContext.viewportValue.minDepth = minDepth;
renderContext.viewportValue.maxDepth = maxDepth;
renderContext.viewport = renderContext.viewportValue.equals( _screen ) === false;
renderContext.scissorValue.copy( scissor ).multiplyScalar( pixelRatio ).floor();
renderContext.scissor = this._scissorTest && renderContext.scissorValue.equals( _screen ) === false;
renderContext.scissorValue.width >>= activeMipmapLevel;
renderContext.scissorValue.height >>= activeMipmapLevel;
if ( ! renderContext.clippingContext ) renderContext.clippingContext = new ClippingContext();
renderContext.clippingContext.updateGlobal( sceneRef, camera );
//
sceneRef.onBeforeRender( this, scene, camera, renderTarget );
//
_projScreenMatrix.multiplyMatrices( camera.projectionMatrix, camera.matrixWorldInverse );
_frustum.setFromProjectionMatrix( _projScreenMatrix, coordinateSystem );
const renderList = this._renderLists.get( scene, camera );
renderList.begin();
this._projectObject( scene, camera, 0, renderList, renderContext.clippingContext );
renderList.finish();
if ( this.sortObjects === true ) {
renderList.sort( this._opaqueSort, this._transparentSort );
}
//
if ( renderTarget !== null ) {
this._textures.updateRenderTarget( renderTarget, activeMipmapLevel );
const renderTargetData = this._textures.get( renderTarget );
renderContext.textures = renderTargetData.textures;
renderContext.depthTexture = renderTargetData.depthTexture;
renderContext.width = renderTargetData.width;
renderContext.height = renderTargetData.height;
renderContext.renderTarget = renderTarget;
renderContext.depth = renderTarget.depthBuffer;
renderContext.stencil = renderTarget.stencilBuffer;
} else {
renderContext.textures = null;
renderContext.depthTexture = null;
renderContext.width = this.domElement.width;
renderContext.height = this.domElement.height;
renderContext.depth = this.depth;
renderContext.stencil = this.stencil;
}
renderContext.width >>= activeMipmapLevel;
renderContext.height >>= activeMipmapLevel;
renderContext.activeCubeFace = activeCubeFace;
renderContext.activeMipmapLevel = activeMipmapLevel;
renderContext.occlusionQueryCount = renderList.occlusionQueryCount;
//
this._background.update( sceneRef, renderList, renderContext );
//
this.backend.beginRender( renderContext );
// process render lists
const {
bundles,
lightsNode,
transparentDoublePass: transparentDoublePassObjects,
transparent: transparentObjects,
opaque: opaqueObjects
} = renderList;
if ( bundles.length > 0 ) this._renderBundles( bundles, sceneRef, lightsNode );
if ( this.opaque === true && opaqueObjects.length > 0 ) this._renderObjects( opaqueObjects, camera, sceneRef, lightsNode );
if ( this.transparent === true && transparentObjects.length > 0 ) this._renderTransparents( transparentObjects, transparentDoublePassObjects, camera, sceneRef, lightsNode );
// finish render pass
this.backend.finishRender( renderContext );
// restore render tree
nodeFrame.renderId = previousRenderId;
this._currentRenderContext = previousRenderContext;
this._currentRenderObjectFunction = previousRenderObjectFunction;
//
if ( frameBufferTarget !== null ) {
this.setRenderTarget( outputRenderTarget, activeCubeFace, activeMipmapLevel );
const quad = this._quad;
if ( this._nodes.hasOutputChange( renderTarget.texture ) ) {
quad.material.fragmentNode = this._nodes.getOutputNode( renderTarget.texture );
quad.material.needsUpdate = true;
}
this._renderScene( quad, quad.camera, false );
}
//
sceneRef.onAfterRender( this, scene, camera, renderTarget );
//
return renderContext;
}
/**
* Returns the maximum available anisotropy for texture filtering.
*
* @return {Number} The maximum available anisotropy.
*/
getMaxAnisotropy() {
return this.backend.getMaxAnisotropy();
}
/**
* Returns the active cube face.
*
* @return {Number} The active cube face.
*/
getActiveCubeFace() {
return this._activeCubeFace;
}
/**
* Returns the active mipmap level.
*
* @return {Number} The active mipmap level.
*/
getActiveMipmapLevel() {
return this._activeMipmapLevel;
}
/**
* Applications are advised to always define the animation loop
* with this method and not manually with `requestAnimationFrame()`
* for best compatibility.
*
* @async
* @param {Function} callback - The application's animation loop.
* @return {Promise} A Promise that resolves when the set has been executed.
*/
async setAnimationLoop( callback ) {
if ( this._initialized === false ) await this.init();
this._animation.setAnimationLoop( callback );
}
/**
* Can be used to transfer buffer data from a storage buffer attribute
* from the GPU to the CPU in context of compute shaders.
*
* @async
* @param {StorageBufferAttribute} attribute - The storage buffer attribute.
* @return {Promise<ArrayBuffer>} A promise that resolves with the buffer data when the data are ready.
*/
async getArrayBufferAsync( attribute ) {
return await this.backend.getArrayBufferAsync( attribute );
}
/**
* Returns the rendering context.
*
* @return {GPUCanvasContext|WebGL2RenderingContext} The rendering context.
*/
getContext() {
return this.backend.getContext();
}
/**
* Returns the pixel ratio.
*
* @return {Number} The pixel ratio.
*/
getPixelRatio() {
return this._pixelRatio;
}
/**
* Returns the drawing buffer size in physical pixels. This method honors the pixel ratio.
*
* @param {Vector2} target - The method writes the result in this target object.
* @return {Vector2} The drawing buffer size.
*/
getDrawingBufferSize( target ) {
return target.set( this._width * this._pixelRatio, this._height * this._pixelRatio ).floor();
}
/**
* Returns the renderer's size in logical pixels. This method does not honor the pixel ratio.
*
* @param {Vector2} target - The method writes the result in this target object.
* @return {Vector2} The drawing buffer size.
*/
getSize( target ) {
return target.set( this._width, this._height );
}
/**
* Sets the given pixel ration and resizes the canvas if necessary.
*
* @param {Number} [value=1] - The pixel ratio.
*/
setPixelRatio( value = 1 ) {
if ( this._pixelRatio === value ) return;
this._pixelRatio = value;
this.setSize( this._width, this._height, false );
}
/**
* This method allows to define the drawing buffer size by specifying
* width, height and pixel ratio all at once. The size of the drawing
* buffer is computed with this formula:
* ````
* size.x = width * pixelRatio;
* size.y = height * pixelRatio;
*```
*
* @param {Number} width - The width in logical pixels.
* @param {Number} height - The height in logical pixels.
* @param {Number} pixelRatio - The pixel ratio.
*/
setDrawingBufferSize( width, height, pixelRatio ) {
this._width = width;
this._height = height;
this._pixelRatio = pixelRatio;
this.domElement.width = Math.floor( width * pixelRatio );
this.domElement.height = Math.floor( height * pixelRatio );
this.setViewport( 0, 0, width, height );
if ( this._initialized ) this.backend.updateSize();
}
/**
* Sets the size of the renderer.
*
* @param {Number} width - The width in logical pixels.
* @param {Number} height - The height in logical pixels.
* @param {Boolean} [updateStyle=true] - Whether to update the `style` attribute of the canvas or not.
*/
setSize( width, height, updateStyle = true ) {
this._width = width;
this._height = height;
this.domElement.width = Math.floor( width * this._pixelRatio );
this.domElement.height = Math.floor( height * this._pixelRatio );
if ( updateStyle === true ) {
this.domElement.style.width = width + 'px';
this.domElement.style.height = height + 'px';
}
this.setViewport( 0, 0, width, height );
if ( this._initialized ) this.backend.updateSize();
}
/**
* Defines a manual sort function for the opaque render list.
* Pass `null` to use the default sort.
*
* @param {Function} method - The sort function.
*/
setOpaqueSort( method ) {
this._opaqueSort = method;
}
/**
* Defines a manual sort function for the transparent render list.
* Pass `null` to use the default sort.
*
* @param {Function} method - The sort function.
*/
setTransparentSort( method ) {
this._transparentSort = method;
}
/**
* Returns the scissor rectangle.
*
* @param {Vector4} target - The method writes the result in this target object.
* @return {Vector4} The scissor rectangle.
*/
getScissor( target ) {
const scissor = this._scissor;
target.x = scissor.x;
target.y = scissor.y;
target.width = scissor.width;
target.height = scissor.height;
return target;
}
/**
* Defines the scissor rectangle.
*
* @param {Number | Vector4} x - The horizontal coordinate for the lower left corner of the box in logical pixel unit.
* Instead of passing four arguments, the method also works with a single four-dimensional vector.
* @param {Number} y - The vertical coordinate for the lower left corner of the box in logical pixel unit.
* @param {Number} width - The width of the scissor box in logical pixel unit.
* @param {Number} height - The height of the scissor box in logical pixel unit.
*/
setScissor( x, y, width, height ) {
const scissor = this._scissor;
if ( x.isVector4 ) {
scissor.copy( x );
} else {
scissor.set( x, y, width, height );
}
}
/**
* Returns the scissor test value.
*
* @return {Boolean} Whether the scissor test should be enabled or not.
*/
getScissorTest() {
return this._scissorTest;
}
/**
* Defines the scissor test.
*
* @param {Boolean} boolean - Whether the scissor test should be enabled or not.
*/
setScissorTest( boolean ) {
this._scissorTest = boolean;
this.backend.setScissorTest( boolean );
}
/**
* Returns the viewport definition.
*
* @param {Vector4} target - The method writes the result in this target object.
* @return {Vector4} The viewport definition.
*/
getViewport( target ) {
return target.copy( this._viewport );
}
/**
* Defines the viewport.
*
* @param {Number | Vector4} x - The horizontal coordinate for the lower left corner of the viewport origin in logical pixel unit.
* @param {Number} y - The vertical coordinate for the lower left corner of the viewport origin in logical pixel unit.
* @param {Number} width - The width of the viewport in logical pixel unit.
* @param {Number} height - The height of the viewport in logical pixel unit.
* @param {Number} minDepth - The minimum depth value of the viewport. WebGPU only.
* @param {Number} maxDepth - The maximum depth value of the viewport. WebGPU only.
*/
setViewport( x, y, width, height, minDepth = 0, maxDepth = 1 ) {
const viewport = this._viewport;
if ( x.isVector4 ) {
viewport.copy( x );
} else {
viewport.set( x, y, width, height );
}
viewport.minDepth = minDepth;
viewport.maxDepth = maxDepth;
}
/**
* Returns the clear color.
*
* @param {Color} target - The method writes the result in this target object.
* @return {Color} The clear color.
*/
getClearColor( target ) {
return target.copy( this._clearColor );
}
/**
* Defines the clear color and optionally the clear alpha.
*
* @param {Color} color - The clear color.
* @param {Number} [alpha=1] - The clear alpha.
*/
setClearColor( color, alpha = 1 ) {
this._clearColor.set( color );
this._clearColor.a = alpha;
}
/**
* Returns the clear alpha.
*
* @return {Number} The clear alpha.
*/
getClearAlpha() {
return this._clearColor.a;
}
/**
* Defines the clear alpha.
*
* @param {Number} alpha - The clear alpha.
*/
setClearAlpha( alpha ) {
this._clearColor.a = alpha;
}
/**
* Returns the clear depth.
*
* @return {Number} The clear depth.
*/
getClearDepth() {
return this._clearDepth;
}
/**
* Defines the clear depth.
*
* @param {Number} depth - The clear depth.
*/
setClearDepth( depth ) {
this._clearDepth = depth;
}
/**
* Returns the clear stencil.
*
* @return {Number} The clear stencil.
*/
getClearStencil() {
return this._clearStencil;
}
/**
* Defines the clear stencil.
*
* @param {Number} stencil - The clear stencil.
*/
setClearStencil( stencil ) {
this._clearStencil = stencil;
}
/**
* This method performs an occlusion query for the given 3D object.
* It returns `true` if the given 3D object is fully occluded by other
* 3D objects in the scene.
*
* @param {Object3D} object - The 3D object to test.
* @return {Boolean} Whether the 3D object is fully occluded or not.
*/
isOccluded( object ) {
const renderContext = this._currentRenderContext;
return renderContext && this.backend.isOccluded( renderContext, object );
}
/**
* Performs a manual clear operation. This method ignores `autoClear` properties.
*
* @param {Boolean} [color=true] - Whether the color buffer should be cleared or not.
* @param {Boolean} [depth=true] - Whether the depth buffer should be cleared or not.
* @param {Boolean} [stencil=true] - Whether the stencil buffer should be cleared or not.
* @return {Promise} A Promise that resolves when the clear operation has been executed.
* Only returned when the renderer has not been initialized.
*/
clear( color = true, depth = true, stencil = true ) {
if ( this._initialized === false ) {
console.warn( 'THREE.Renderer: .clear() called before the backend is initialized. Try using .clearAsync() instead.' );
return this.clearAsync( color, depth, stencil );
}
const renderTarget = this._renderTarget || this._getFrameBufferTarget();
let renderContext = null;
if ( renderTarget !== null ) {
this._textures.updateRenderTarget( renderTarget );
const renderTargetData = this._textures.get( renderTarget );
renderContext = this._renderContexts.getForClear( renderTarget );
renderContext.textures = renderTargetData.textures;
renderContext.depthTexture = renderTargetData.depthTexture;
renderContext.width = renderTargetData.width;
renderContext.height = renderTargetData.height;
renderContext.renderTarget = renderTarget;
renderContext.depth = renderTarget.depthBuffer;
renderContext.stencil = renderTarget.stencilBuffer;
}
// #30329
renderContext.clearColorValue = this._clearColor;
this.backend.clear( color, depth, stencil, renderContext );
if ( renderTarget !== null && this._renderTarget === null ) {
// If a color space transform or tone mapping is required,
// the clear operation clears the intermediate renderTarget texture, but does not update the screen canvas.
const quad = this._quad;
if ( this._nodes.hasOutputChange( renderTarget.texture ) ) {
quad.material.fragmentNode = this._nodes.getOutputNode( renderTarget.texture );
quad.material.needsUpdate = true;
}
this._renderScene( quad, quad.camera, false );
}
}
/**
* Performs a manual clear operation of the color buffer. This method ignores `autoClear` properties.
*
* @return {Promise} A Promise that resolves when the clear operation has been executed.
* Only returned when the renderer has not been initialized.
*/
clearColor() {
return this.clear( true, false, false );
}
/**
* Performs a manual clear operation of the depth buffer. This method ignores `autoClear` properties.
*
* @return {Promise} A Promise that resolves when the clear operation has been executed.
* Only returned when the renderer has not been initialized.
*/
clearDepth() {
return this.clear( false, true, false );
}
/**
* Performs a manual clear operation of the stencil buffer. This method ignores `autoClear` properties.
*
* @return {Promise} A Promise that resolves when the clear operation has been executed.
* Only returned when the renderer has not been initialized.
*/
clearStencil() {
return this.clear( false, false, true );
}
/**
* Async version of {@link module:Renderer~Renderer#clear}.
*
* @async
* @param {Boolean} [color=true] - Whether the color buffer should be cleared or not.
* @param {Boolean} [depth=true] - Whether the depth buffer should be cleared or not.
* @param {Boolean} [stencil=true] - Whether the stencil buffer should be cleared or not.
* @return {Promise} A Promise that resolves when the clear operation has been executed.
*/
async clearAsync( color = true, depth = true, stencil = true ) {
if ( this._initialized === false ) await this.init();
this.clear( color, depth, stencil );
}
/**
* Async version of {@link module:Renderer~Renderer#clearColor}.
*
* @async
* @return {Promise} A Promise that resolves when the clear operation has been executed.
*/
async clearColorAsync() {
this.clearAsync( true, false, false );
}
/**
* Async version of {@link module:Renderer~Renderer#clearDepth}.
*
* @async
* @return {Promise} A Promise that resolves when the clear operation has been executed.
*/
async clearDepthAsync() {
this.clearAsync( false, true, false );
}
/**
* Async version of {@link module:Renderer~Renderer#clearStencil}.
*
* @async
* @return {Promise} A Promise that resolves when the clear operation has been executed.
*/
async clearStencilAsync() {
this.clearAsync( false, false, true );
}
/**
* The current output tone mapping of the renderer. When a render target is set,
* the output tone mapping is always `NoToneMapping`.
*
* @type {Number}
*/
get currentToneMapping() {
return this._renderTarget !== null ? NoToneMapping : this.toneMapping;
}
/**
* The current output color space of the renderer. When a render target is set,
* the output color space is always `LinearSRGBColorSpace`.
*
* @type {String}
*/
get currentColorSpace() {
return this._renderTarget !== null ? LinearSRGBColorSpace : this.outputColorSpace;
}
/**
* Frees all internal resources of the renderer. Call this method if the renderer
* is no longer in use by your app.
*/
dispose() {
this.info.dispose();
this.backend.dispose();
this._animation.dispose();
this._objects.dispose();
this._pipelines.dispose();
this._nodes.dispose();
this._bindings.dispose();
this._renderLists.dispose();
this._renderContexts.dispose();
this._textures.dispose();
if ( this._frameBufferTarget !== null ) this._frameBufferTarget.dispose();
Object.values( this.backend.timestampQueryPool ).forEach( queryPool => {
if ( queryPool !== null ) queryPool.dispose();
} );
this.setRenderTarget( null );
this.setAnimationLoop( null );
}
/**
* Sets the given render target. Calling this method means the renderer does not
* target the default framebuffer (meaning the canvas) anymore but a custom framebuffer.
* Use `null` as the first argument to reset the state.
*
* @param {RenderTarget?} renderTarget - The render target to set.
* @param {Number} [activeCubeFace=0] - The active cube face.
* @param {Number} [activeMipmapLevel=0] - The active mipmap level.
*/
setRenderTarget( renderTarget, activeCubeFace = 0, activeMipmapLevel = 0 ) {
this._renderTarget = renderTarget;
this._activeCubeFace = activeCubeFace;
this._activeMipmapLevel = activeMipmapLevel;
}
/**
* Returns the current render target.
*
* @return {RenderTarget?} The render target. Returns `null` if no render target is set.
*/
getRenderTarget() {
return this._renderTarget;
}
/**
* Callback for {@link module:Renderer~Renderer#setRenderObjectFunction}.
*
* @callback renderObjectFunction
* @param {Object3D} object - The 3D object.
* @param {Scene} scene - The scene the 3D object belongs to.
* @param {Camera} camera - The camera the object should be rendered with.
* @param {BufferGeometry} geometry - The object's geometry.
* @param {Material} material - The object's material.
* @param {Object?} group - Only relevant for objects using multiple materials. This represents a group entry from the respective `BufferGeometry`.
* @param {LightsNode} lightsNode - The current lights node.
* @param {ClippingContext} clippingContext - The clipping context.
* @param {String?} [passId=null] - An optional ID for identifying the pass.
*/
/**
* Sets the given render object function. Calling this method overwrites the default implementation
* which is {@link module:Renderer~Renderer#renderObject}. Defining a custom function can be useful
* if you want to modify the way objects are rendered. For example you can define things like "every
* object that has material of a certain type should perform a pre-pass with a special overwrite material".
* The custom function must always call `renderObject()` in its implementation.
*
* Use `null` as the first argument to reset the state.
*
* @param {module:Renderer~renderObjectFunction?} renderObjectFunction - The render object function.
*/
setRenderObjectFunction( renderObjectFunction ) {
this._renderObjectFunction = renderObjectFunction;
}
/**
* Returns the current render object function.
*
* @return {Function?} The current render object function. Returns `null` if no function is set.
*/
getRenderObjectFunction() {
return this._renderObjectFunction;
}
/**
* Execute a single or an array of compute nodes. This method can only be called
* if the renderer has been initialized.
*
* @param {Node|Array<Node>} computeNodes - The compute node(s).
* @return {Promise?} A Promise that resolve when the compute has finished. Only returned when the renderer has not been initialized.
*/
compute( computeNodes ) {
if ( this._isDeviceLost === true ) return;
if ( this._initialized === false ) {
console.warn( 'THREE.Renderer: .compute() called before the backend is initialized. Try using .computeAsync() instead.' );
return this.computeAsync( computeNodes );
}
//
const nodeFrame = this._nodes.nodeFrame;
const previousRenderId = nodeFrame.renderId;
//
this.info.calls ++;
this.info.compute.calls ++;
this.info.compute.frameCalls ++;
nodeFrame.renderId = this.info.calls;
//
const backend = this.backend;
const pipelines = this._pipelines;
const bindings = this._bindings;
const nodes = this._nodes;
const computeList = Array.isArray( computeNodes ) ? computeNodes : [ computeNodes ];
if ( computeList[ 0 ] === undefined || computeList[ 0 ].isComputeNode !== true ) {
throw new Error( 'THREE.Renderer: .compute() expects a ComputeNode.' );
}
backend.beginCompute( computeNodes );
for ( const computeNode of computeList ) {
// onInit
if ( pipelines.has( computeNode ) === false ) {
const dispose = () => {
computeNode.removeEventListener( 'dispose', dispose );
pipelines.delete( computeNode );
bindings.delete( computeNode );
nodes.delete( computeNode );
};
computeNode.addEventListener( 'dispose', dispose );
//
const onInitFn = computeNode.onInitFunction;
if ( onInitFn !== null ) {
onInitFn.call( computeNode, { renderer: this } );
}
}
nodes.updateForCompute( computeNode );
bindings.updateForCompute( computeNode );
const computeBindings = bindings.getForCompute( computeNode );
const computePipeline = pipelines.getForCompute( computeNode, computeBindings );
backend.compute( computeNodes, computeNode, computeBindings, computePipeline );
}
backend.finishCompute( computeNodes );
//
nodeFrame.renderId = previousRenderId;
}
/**
* Execute a single or an array of compute nodes.
*
* @async
* @param {Node|Array<Node>} computeNodes - The compute node(s).
* @return {Promise} A Promise that resolve when the compute has finished.
*/
async computeAsync( computeNodes ) {
if ( this._initialized === false ) await this.init();
this.compute( computeNodes );
}
/**
* Checks if the given feature is supported by the selected backend.
*
* @async
* @param {String} name - The feature's name.
* @return {Promise<Boolean>} A Promise that resolves with a bool that indicates whether the feature is supported or not.
*/
async hasFeatureAsync( name ) {
if ( this._initialized === false ) await this.init();
return this.backend.hasFeature( name );
}
async resolveTimestampsAsync( type = 'render' ) {
if ( this._initialized === false ) await this.init();
return this.backend.resolveTimestampsAsync( type );
}
/**
* Checks if the given feature is supported by the selected backend. If the
* renderer has not been initialized, this method always returns `false`.
*
* @param {String} name - The feature's name.
* @return {Boolean} Whether the feature is supported or not.
*/
hasFeature( name ) {
if ( this._initialized === false ) {
console.warn( 'THREE.Renderer: .hasFeature() called before the backend is initialized. Try using .hasFeatureAsync() instead.' );
return false;
}
return this.backend.hasFeature( name );
}
/**
* Returns `true` when the renderer has been initialized.
*
* @return {Boolean} Whether the renderer has been initialized or not.
*/
hasInitialized() {
return this._initialized;
}
/**
* Initializes the given textures. Useful for preloading a texture rather than waiting until first render
* (which can cause noticeable lags due to decode and GPU upload overhead).
*
* @async
* @param {Texture} texture - The texture.
* @return {Promise} A Promise that resolves when the texture has been initialized.
*/
async initTextureAsync( texture ) {
if ( this._initialized === false ) await this.init();
this._textures.updateTexture( texture );
}
/**
* Initializes the given textures. Useful for preloading a texture rather than waiting until first render
* (which can cause noticeable lags due to decode and GPU upload overhead).
*
* This method can only be used if the renderer has been initialized.
*
* @param {Texture} texture - The texture.
*/
initTexture( texture ) {
if ( this._initialized === false ) {
console.warn( 'THREE.Renderer: .initTexture() called before the backend is initialized. Try using .initTextureAsync() instead.' );
}
this._textures.updateTexture( texture );
}
/**
* Copies the current bound framebuffer into the given texture.
*
* @param {FramebufferTexture} framebufferTexture - The texture.
* @param {Vector2|Vector4} rectangle - A two or four dimensional vector that defines the rectangular portion of the framebuffer that should be copied.
*/
copyFramebufferToTexture( framebufferTexture, rectangle = null ) {
if ( rectangle !== null ) {
if ( rectangle.isVector2 ) {
rectangle = _vector4.set( rectangle.x, rectangle.y, framebufferTexture.image.width, framebufferTexture.image.height ).floor();
} else if ( rectangle.isVector4 ) {
rectangle = _vector4.copy( rectangle ).floor();
} else {
console.error( 'THREE.Renderer.copyFramebufferToTexture: Invalid rectangle.' );
return;
}
} else {
rectangle = _vector4.set( 0, 0, framebufferTexture.image.width, framebufferTexture.image.height );
}
//
let renderContext = this._currentRenderContext;
let renderTarget;
if ( renderContext !== null ) {
renderTarget = renderContext.renderTarget;
} else {
renderTarget = this._renderTarget || this._getFrameBufferTarget();
if ( renderTarget !== null ) {
this._textures.updateRenderTarget( renderTarget );
renderContext = this._textures.get( renderTarget );
}
}
//
this._textures.updateTexture( framebufferTexture, { renderTarget } );
this.backend.copyFramebufferToTexture( framebufferTexture, renderContext, rectangle );
}
/**
* Copies data of source texture into a destination texture.
*
* @param {Texture} srcTexture - The source texture.
* @param {Texture} dstTexture - The destination texture.
* @param {Box2|Box3} [srcRegion=null] - A bounding box which describes the source region. Can be two or three-dimensional.
* @param {Vector2|Vector3} [dstPosition=null] - A vector that represents the origin of the destination region. Can be two or three-dimensional.
* @param {Number} level - The mipmap level to copy.
*/
copyTextureToTexture( srcTexture, dstTexture, srcRegion = null, dstPosition = null, level = 0 ) {
this._textures.updateTexture( srcTexture );
this._textures.updateTexture( dstTexture );
this.backend.copyTextureToTexture( srcTexture, dstTexture, srcRegion, dstPosition, level );
}
/**
* Reads pixel data from the given render target.
*
* @async
* @param {RenderTarget} renderTarget - The render target to read from.
* @param {Number} x - The `x` coordinate of the copy region's origin.
* @param {Number} y - The `y` coordinate of the copy region's origin.
* @param {Number} width - The width of the copy region.
* @param {Number} height - The height of the copy region.
* @param {Number} [textureIndex=0] - The texture index of a MRT render target.
* @param {Number} [faceIndex=0] - The active cube face index.
* @return {Promise<TypedArray>} A Promise that resolves when the read has been finished. The resolve provides the read data as a typed array.
*/
async readRenderTargetPixelsAsync( renderTarget, x, y, width, height, textureIndex = 0, faceIndex = 0 ) {
return this.backend.copyTextureToBuffer( renderTarget.textures[ textureIndex ], x, y, width, height, faceIndex );
}
/**
* Analyzes the given 3D object's hierarchy and builds render lists from the
* processed hierarchy.
*
* @param {Object3D} object - The 3D object to process (usually a scene).
* @param {Camera} camera - The camera the object is rendered with.
* @param {Number} groupOrder - The group order is derived from the `renderOrder` of groups and is used to group 3D objects within groups.
* @param {RenderList} renderList - The current render list.
* @param {ClippingContext} clippingContext - The current clipping context.
*/
_projectObject( object, camera, groupOrder, renderList, clippingContext ) {
if ( object.visible === false ) return;
const visible = object.layers.test( camera.layers );
if ( visible ) {
if ( object.isGroup ) {
groupOrder = object.renderOrder;
if ( object.isClippingGroup && object.enabled ) clippingContext = clippingContext.getGroupContext( object );
} else if ( object.isLOD ) {
if ( object.autoUpdate === true ) object.update( camera );
} else if ( object.isLight ) {
renderList.pushLight( object );
} else if ( object.isSprite ) {
if ( ! object.frustumCulled || _frustum.intersectsSprite( object ) ) {
if ( this.sortObjects === true ) {
_vector4.setFromMatrixPosition( object.matrixWorld ).applyMatrix4( _projScreenMatrix );
}
const { geometry, material } = object;
if ( material.visible ) {
renderList.push( object, geometry, material, groupOrder, _vector4.z, null, clippingContext );
}
}
} else if ( object.isLineLoop ) {
console.error( 'THREE.Renderer: Objects of type THREE.LineLoop are not supported. Please use THREE.Line or THREE.LineSegments.' );
} else if ( object.isMesh || object.isLine || object.isPoints ) {
if ( ! object.frustumCulled || _frustum.intersectsObject( object ) ) {
const { geometry, material } = object;
if ( this.sortObjects === true ) {
if ( geometry.boundingSphere === null ) geometry.computeBoundingSphere();
_vector4
.copy( geometry.boundingSphere.center )
.applyMatrix4( object.matrixWorld )
.applyMatrix4( _projScreenMatrix );
}
if ( Array.isArray( material ) ) {
const groups = geometry.groups;
for ( let i = 0, l = groups.length; i < l; i ++ ) {
const group = groups[ i ];
const groupMaterial = material[ group.materialIndex ];
if ( groupMaterial && groupMaterial.visible ) {
renderList.push( object, geometry, groupMaterial, groupOrder, _vector4.z, group, clippingContext );
}
}
} else if ( material.visible ) {
renderList.push( object, geometry, material, groupOrder, _vector4.z, null, clippingContext );
}
}
}
}
if ( object.isBundleGroup === true && this.backend.beginBundle !== undefined ) {
const baseRenderList = renderList;
// replace render list
renderList = this._renderLists.get( object, camera );
renderList.begin();
baseRenderList.pushBundle( {
bundleGroup: object,
camera,
renderList,
} );
renderList.finish();
}
const children = object.children;
for ( let i = 0, l = children.length; i < l; i ++ ) {
this._projectObject( children[ i ], camera, groupOrder, renderList, clippingContext );
}
}
/**
* Renders the given render bundles.
*
* @private
* @param {Array<Object>} bundles - Array with render bundle data.
* @param {Scene} sceneRef - The scene the render bundles belong to.
* @param {LightsNode} lightsNode - The current lights node.
*/
_renderBundles( bundles, sceneRef, lightsNode ) {
for ( const bundle of bundles ) {
this._renderBundle( bundle, sceneRef, lightsNode );
}
}
/**
* Renders the transparent objects from the given render lists.
*
* @private
* @param {Array<Object>} renderList - The transparent render list.
* @param {Array<Object>} doublePassList - The list of transparent objects which require a double pass (e.g. because of transmission).
* @param {Camera} camera - The camera the render list should be rendered with.
* @param {Scene} scene - The scene the render list belongs to.
* @param {LightsNode} lightsNode - The current lights node.
*/
_renderTransparents( renderList, doublePassList, camera, scene, lightsNode ) {
if ( doublePassList.length > 0 ) {
// render back side
for ( const { material } of doublePassList ) {
material.side = BackSide;
}
this._renderObjects( doublePassList, camera, scene, lightsNode, 'backSide' );
// render front side
for ( const { material } of doublePassList ) {
material.side = FrontSide;
}
this._renderObjects( renderList, camera, scene, lightsNode );
// restore
for ( const { material } of doublePassList ) {
material.side = DoubleSide;
}
} else {
this._renderObjects( renderList, camera, scene, lightsNode );
}
}
/**
* Renders the objects from the given render list.
*
* @private
* @param {Array<Object>} renderList - The render list.
* @param {Camera} camera - The camera the render list should be rendered with.
* @param {Scene} scene - The scene the render list belongs to.
* @param {LightsNode} lightsNode - The current lights node.
* @param {String?} [passId=null] - An optional ID for identifying the pass.
*/
_renderObjects( renderList, camera, scene, lightsNode, passId = null ) {
for ( let i = 0, il = renderList.length; i < il; i ++ ) {
const { object, geometry, material, group, clippingContext } = renderList[ i ];
this._currentRenderObjectFunction( object, scene, camera, geometry, material, group, lightsNode, clippingContext, passId );
}
}
/**
* This method represents the default render object function that manages the render lifecycle
* of the object.
*
* @param {Object3D} object - The 3D object.
* @param {Scene} scene - The scene the 3D object belongs to.
* @param {Camera} camera - The camera the object should be rendered with.
* @param {BufferGeometry} geometry - The object's geometry.
* @param {Material} material - The object's material.
* @param {Object?} group - Only relevant for objects using multiple materials. This represents a group entry from the respective `BufferGeometry`.
* @param {LightsNode} lightsNode - The current lights node.
* @param {ClippingContext} clippingContext - The clipping context.
* @param {String?} [passId=null] - An optional ID for identifying the pass.
*/
renderObject( object, scene, camera, geometry, material, group, lightsNode, clippingContext = null, passId = null ) {
let overridePositionNode;
let overrideColorNode;
let overrideDepthNode;
//
object.onBeforeRender( this, scene, camera, geometry, material, group );
//
if ( scene.overrideMaterial !== null ) {
const overrideMaterial = scene.overrideMaterial;
if ( material.positionNode && material.positionNode.isNode ) {
overridePositionNode = overrideMaterial.positionNode;
overrideMaterial.positionNode = material.positionNode;
}
overrideMaterial.alphaTest = material.alphaTest;
overrideMaterial.alphaMap = material.alphaMap;
overrideMaterial.transparent = material.transparent || material.transmission > 0;
if ( overrideMaterial.isShadowPassMaterial ) {
overrideMaterial.side = material.shadowSide === null ? material.side : material.shadowSide;
if ( material.depthNode && material.depthNode.isNode ) {
overrideDepthNode = overrideMaterial.depthNode;
overrideMaterial.depthNode = material.depthNode;
}
if ( material.castShadowNode && material.castShadowNode.isNode ) {
overrideColorNode = overrideMaterial.colorNode;
overrideMaterial.colorNode = material.castShadowNode;
}
}
material = overrideMaterial;
}
//
if ( material.transparent === true && material.side === DoubleSide && material.forceSinglePass === false ) {
material.side = BackSide;
this._handleObjectFunction( object, material, scene, camera, lightsNode, group, clippingContext, 'backSide' ); // create backSide pass id
material.side = FrontSide;
this._handleObjectFunction( object, material, scene, camera, lightsNode, group, clippingContext, passId ); // use default pass id
material.side = DoubleSide;
} else {
this._handleObjectFunction( object, material, scene, camera, lightsNode, group, clippingContext, passId );
}
//
if ( overridePositionNode !== undefined ) {
scene.overrideMaterial.positionNode = overridePositionNode;
}
if ( overrideDepthNode !== undefined ) {
scene.overrideMaterial.depthNode = overrideDepthNode;
}
if ( overrideColorNode !== undefined ) {
scene.overrideMaterial.colorNode = overrideColorNode;
}
//
object.onAfterRender( this, scene, camera, geometry, material, group );
}
/**
* This method represents the default `_handleObjectFunction` implementation which creates
* a render object from the given data and performs the draw command with the selected backend.
*
* @private
* @param {Object3D} object - The 3D object.
* @param {Material} material - The object's material.
* @param {Scene} scene - The scene the 3D object belongs to.
* @param {Camera} camera - The camera the object should be rendered with.
* @param {LightsNode} lightsNode - The current lights node.
* @param {{start: Number, count: Number}?} group - Only relevant for objects using multiple materials. This represents a group entry from the respective `BufferGeometry`.
* @param {ClippingContext} clippingContext - The clipping context.
* @param {String?} [passId=null] - An optional ID for identifying the pass.
*/
_renderObjectDirect( object, material, scene, camera, lightsNode, group, clippingContext, passId ) {
const renderObject = this._objects.get( object, material, scene, camera, lightsNode, this._currentRenderContext, clippingContext, passId );
renderObject.drawRange = object.geometry.drawRange;
renderObject.group = group;
//
const needsRefresh = this._nodes.needsRefresh( renderObject );
if ( needsRefresh ) {
this._nodes.updateBefore( renderObject );
this._geometries.updateForRender( renderObject );
this._nodes.updateForRender( renderObject );
this._bindings.updateForRender( renderObject );
}
this._pipelines.updateForRender( renderObject );
//
if ( this._currentRenderBundle !== null ) {
const renderBundleData = this.backend.get( this._currentRenderBundle );
renderBundleData.renderObjects.push( renderObject );
renderObject.bundle = this._currentRenderBundle.bundleGroup;
}
this.backend.draw( renderObject, this.info );
if ( needsRefresh ) this._nodes.updateAfter( renderObject );
}
/**
* A different implementation for `_handleObjectFunction` which only makes sure the object is ready for rendering.
* Used in `compileAsync()`.
*
* @private
* @param {Object3D} object - The 3D object.
* @param {Material} material - The object's material.
* @param {Scene} scene - The scene the 3D object belongs to.
* @param {Camera} camera - The camera the object should be rendered with.
* @param {LightsNode} lightsNode - The current lights node.
* @param {{start: Number, count: Number}?} group - Only relevant for objects using multiple materials. This represents a group entry from the respective `BufferGeometry`.
* @param {ClippingContext} clippingContext - The clipping context.
* @param {String?} [passId=null] - An optional ID for identifying the pass.
*/
_createObjectPipeline( object, material, scene, camera, lightsNode, group, clippingContext, passId ) {
const renderObject = this._objects.get( object, material, scene, camera, lightsNode, this._currentRenderContext, clippingContext, passId );
renderObject.drawRange = object.geometry.drawRange;
renderObject.group = group;
//
this._nodes.updateBefore( renderObject );
this._geometries.updateForRender( renderObject );
this._nodes.updateForRender( renderObject );
this._bindings.updateForRender( renderObject );
this._pipelines.getForRender( renderObject, this._compilationPromises );
this._nodes.updateAfter( renderObject );
}
/**
* Alias for `compileAsync()`.
*
* @method
* @param {Object3D} scene - The scene or 3D object to precompile.
* @param {Camera} camera - The camera that is used to render the scene.
* @param {Scene} targetScene - If the first argument is a 3D object, this parameter must represent the scene the 3D object is going to be added.
* @return {Promise} A Promise that resolves when the compile has been finished.
*/
get compile() {
return this.compileAsync;
}
}
/**
* A binding represents the connection between a resource (like a texture, sampler
* or uniform buffer) and the resource definition in a shader stage.
*
* This module is an abstract base class for all concrete bindings types.
*
* @abstract
* @private
*/
class Binding {
/**
* Constructs a new binding.
*
* @param {String} [name=''] - The binding's name.
*/
constructor( name = '' ) {
/**
* The binding's name.
*
* @type {String}
*/
this.name = name;
/**
* A bitmask that defines in what shader stages the
* binding's resource is accessible.
*
* @type {Number}
*/
this.visibility = 0;
}
/**
* Makes sure binding's resource is visible for the given shader stage.
*
* @param {Number} visibility - The shader stage.
*/
setVisibility( visibility ) {
this.visibility |= visibility;
}
/**
* Clones the binding.
*
* @return {Binding} The cloned binding.
*/
clone() {
return Object.assign( new this.constructor(), this );
}
}
/** @module BufferUtils **/
/**
* This function is usually called with the length in bytes of an array buffer.
* It returns an padded value which ensure chunk size alignment according to STD140 layout.
*
* @function
* @param {Number} floatLength - The buffer length.
* @return {Number} The padded length.
*/
function getFloatLength( floatLength ) {
// ensure chunk size alignment (STD140 layout)
return floatLength + ( ( GPU_CHUNK_BYTES - ( floatLength % GPU_CHUNK_BYTES ) ) % GPU_CHUNK_BYTES );
}
/**
* Represents a buffer binding type.
*
* @private
* @abstract
* @augments Binding
*/
class Buffer extends Binding {
/**
* Constructs a new buffer.
*
* @param {String} name - The buffer's name.
* @param {TypedArray} [buffer=null] - The buffer.
*/
constructor( name, buffer = null ) {
super( name );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isBuffer = true;
/**
* The bytes per element.
*
* @type {Number}
*/
this.bytesPerElement = Float32Array.BYTES_PER_ELEMENT;
/**
* A reference to the internal buffer.
*
* @private
* @type {TypedArray}
*/
this._buffer = buffer;
}
/**
* The buffer's byte length.
*
* @type {Number}
* @readonly
*/
get byteLength() {
return getFloatLength( this._buffer.byteLength );
}
/**
* A reference to the internal buffer.
*
* @type {Float32Array}
* @readonly
*/
get buffer() {
return this._buffer;
}
/**
* Updates the binding.
*
* @return {Boolean} Whether the buffer has been updated and must be
* uploaded to the GPU.
*/
update() {
return true;
}
}
/**
* Represents a uniform buffer binding type.
*
* @private
* @augments Buffer
*/
class UniformBuffer extends Buffer {
/**
* Constructs a new uniform buffer.
*
* @param {String} name - The buffer's name.
* @param {TypedArray} [buffer=null] - The buffer.
*/
constructor( name, buffer = null ) {
super( name, buffer );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isUniformBuffer = true;
}
}
let _id$4 = 0;
/**
* A special form of uniform buffer binding type.
* It's buffer value is managed by a node object.
*
* @private
* @augments UniformBuffer
*/
class NodeUniformBuffer extends UniformBuffer {
/**
* Constructs a new node-based uniform buffer.
*
* @param {BufferNode} nodeUniform - The uniform buffer node.
* @param {UniformGroupNode} groupNode - The uniform group node.
*/
constructor( nodeUniform, groupNode ) {
super( 'UniformBuffer_' + _id$4 ++, nodeUniform ? nodeUniform.value : null );
/**
* The uniform buffer node.
*
* @type {BufferNode}
*/
this.nodeUniform = nodeUniform;
/**
* The uniform group node.
*
* @type {UniformGroupNode}
*/
this.groupNode = groupNode;
}
/**
* The uniform buffer.
*
* @type {Float32Array}
*/
get buffer() {
return this.nodeUniform.value;
}
}
/**
* This class represents a uniform buffer binding but with
* an API that allows to maintain individual uniform objects.
*
* @private
* @augments UniformBuffer
*/
class UniformsGroup extends UniformBuffer {
/**
* Constructs a new uniforms group.
*
* @param {String} name - The group's name.
*/
constructor( name ) {
super( name );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isUniformsGroup = true;
/**
* An array with the raw uniform values.
*
* @private
* @type {Array<Number>?}
* @default null
*/
this._values = null;
/**
* An array of uniform objects.
*
* The order of uniforms in this array must match the order of uniforms in the shader.
*
* @type {Array<Uniform>}
*/
this.uniforms = [];
}
/**
* Adds a uniform to this group.
*
* @param {Uniform} uniform - The uniform to add.
* @return {UniformsGroup} A reference to this group.
*/
addUniform( uniform ) {
this.uniforms.push( uniform );
return this;
}
/**
* Removes a uniform from this group.
*
* @param {Uniform} uniform - The uniform to remove.
* @return {UniformsGroup} A reference to this group.
*/
removeUniform( uniform ) {
const index = this.uniforms.indexOf( uniform );
if ( index !== - 1 ) {
this.uniforms.splice( index, 1 );
}
return this;
}
/**
* An array with the raw uniform values.
*
* @type {Array<Number>}
*/
get values() {
if ( this._values === null ) {
this._values = Array.from( this.buffer );
}
return this._values;
}
/**
* A Float32 array buffer with the uniform values.
*
* @type {Float32Array}
*/
get buffer() {
let buffer = this._buffer;
if ( buffer === null ) {
const byteLength = this.byteLength;
buffer = new Float32Array( new ArrayBuffer( byteLength ) );
this._buffer = buffer;
}
return buffer;
}
/**
* The byte length of the buffer with correct buffer alignment.
*
* @type {Number}
*/
get byteLength() {
let offset = 0; // global buffer offset in bytes
for ( let i = 0, l = this.uniforms.length; i < l; i ++ ) {
const uniform = this.uniforms[ i ];
const { boundary, itemSize } = uniform;
// offset within a single chunk in bytes
const chunkOffset = offset % GPU_CHUNK_BYTES;
const remainingSizeInChunk = GPU_CHUNK_BYTES - chunkOffset;
// conformance tests
if ( chunkOffset !== 0 && ( remainingSizeInChunk - boundary ) < 0 ) {
// check for chunk overflow
offset += ( GPU_CHUNK_BYTES - chunkOffset );
} else if ( chunkOffset % boundary !== 0 ) {
// check for correct alignment
offset += ( chunkOffset % boundary );
}
uniform.offset = ( offset / this.bytesPerElement );
offset += ( itemSize * this.bytesPerElement );
}
return Math.ceil( offset / GPU_CHUNK_BYTES ) * GPU_CHUNK_BYTES;
}
/**
* Updates this group by updating each uniform object of
* the internal uniform list. The uniform objects check if their
* values has actually changed so this method only returns
* `true` if there is a real value change.
*
* @return {Boolean} Whether the uniforms have been updated and
* must be uploaded to the GPU.
*/
update() {
let updated = false;
for ( const uniform of this.uniforms ) {
if ( this.updateByType( uniform ) === true ) {
updated = true;
}
}
return updated;
}
/**
* Updates a given uniform by calling an update method matching
* the uniforms type.
*
* @param {Uniform} uniform - The uniform to update.
* @return {Boolean} Whether the uniform has been updated or not.
*/
updateByType( uniform ) {
if ( uniform.isNumberUniform ) return this.updateNumber( uniform );
if ( uniform.isVector2Uniform ) return this.updateVector2( uniform );
if ( uniform.isVector3Uniform ) return this.updateVector3( uniform );
if ( uniform.isVector4Uniform ) return this.updateVector4( uniform );
if ( uniform.isColorUniform ) return this.updateColor( uniform );
if ( uniform.isMatrix3Uniform ) return this.updateMatrix3( uniform );
if ( uniform.isMatrix4Uniform ) return this.updateMatrix4( uniform );
console.error( 'THREE.WebGPUUniformsGroup: Unsupported uniform type.', uniform );
}
/**
* Updates a given Number uniform.
*
* @param {NumberUniform} uniform - The Number uniform.
* @return {Boolean} Whether the uniform has been updated or not.
*/
updateNumber( uniform ) {
let updated = false;
const a = this.values;
const v = uniform.getValue();
const offset = uniform.offset;
const type = uniform.getType();
if ( a[ offset ] !== v ) {
const b = this._getBufferForType( type );
b[ offset ] = a[ offset ] = v;
updated = true;
}
return updated;
}
/**
* Updates a given Vector2 uniform.
*
* @param {Vector2Uniform} uniform - The Vector2 uniform.
* @return {Boolean} Whether the uniform has been updated or not.
*/
updateVector2( uniform ) {
let updated = false;
const a = this.values;
const v = uniform.getValue();
const offset = uniform.offset;
const type = uniform.getType();
if ( a[ offset + 0 ] !== v.x || a[ offset + 1 ] !== v.y ) {
const b = this._getBufferForType( type );
b[ offset + 0 ] = a[ offset + 0 ] = v.x;
b[ offset + 1 ] = a[ offset + 1 ] = v.y;
updated = true;
}
return updated;
}
/**
* Updates a given Vector3 uniform.
*
* @param {Vector3Uniform} uniform - The Vector3 uniform.
* @return {Boolean} Whether the uniform has been updated or not.
*/
updateVector3( uniform ) {
let updated = false;
const a = this.values;
const v = uniform.getValue();
const offset = uniform.offset;
const type = uniform.getType();
if ( a[ offset + 0 ] !== v.x || a[ offset + 1 ] !== v.y || a[ offset + 2 ] !== v.z ) {
const b = this._getBufferForType( type );
b[ offset + 0 ] = a[ offset + 0 ] = v.x;
b[ offset + 1 ] = a[ offset + 1 ] = v.y;
b[ offset + 2 ] = a[ offset + 2 ] = v.z;
updated = true;
}
return updated;
}
/**
* Updates a given Vector4 uniform.
*
* @param {Vector4Uniform} uniform - The Vector4 uniform.
* @return {Boolean} Whether the uniform has been updated or not.
*/
updateVector4( uniform ) {
let updated = false;
const a = this.values;
const v = uniform.getValue();
const offset = uniform.offset;
const type = uniform.getType();
if ( a[ offset + 0 ] !== v.x || a[ offset + 1 ] !== v.y || a[ offset + 2 ] !== v.z || a[ offset + 4 ] !== v.w ) {
const b = this._getBufferForType( type );
b[ offset + 0 ] = a[ offset + 0 ] = v.x;
b[ offset + 1 ] = a[ offset + 1 ] = v.y;
b[ offset + 2 ] = a[ offset + 2 ] = v.z;
b[ offset + 3 ] = a[ offset + 3 ] = v.w;
updated = true;
}
return updated;
}
/**
* Updates a given Color uniform.
*
* @param {ColorUniform} uniform - The Color uniform.
* @return {Boolean} Whether the uniform has been updated or not.
*/
updateColor( uniform ) {
let updated = false;
const a = this.values;
const c = uniform.getValue();
const offset = uniform.offset;
if ( a[ offset + 0 ] !== c.r || a[ offset + 1 ] !== c.g || a[ offset + 2 ] !== c.b ) {
const b = this.buffer;
b[ offset + 0 ] = a[ offset + 0 ] = c.r;
b[ offset + 1 ] = a[ offset + 1 ] = c.g;
b[ offset + 2 ] = a[ offset + 2 ] = c.b;
updated = true;
}
return updated;
}
/**
* Updates a given Matrix3 uniform.
*
* @param {Matrix3Uniform} uniform - The Matrix3 uniform.
* @return {Boolean} Whether the uniform has been updated or not.
*/
updateMatrix3( uniform ) {
let updated = false;
const a = this.values;
const e = uniform.getValue().elements;
const offset = uniform.offset;
if ( a[ offset + 0 ] !== e[ 0 ] || a[ offset + 1 ] !== e[ 1 ] || a[ offset + 2 ] !== e[ 2 ] ||
a[ offset + 4 ] !== e[ 3 ] || a[ offset + 5 ] !== e[ 4 ] || a[ offset + 6 ] !== e[ 5 ] ||
a[ offset + 8 ] !== e[ 6 ] || a[ offset + 9 ] !== e[ 7 ] || a[ offset + 10 ] !== e[ 8 ] ) {
const b = this.buffer;
b[ offset + 0 ] = a[ offset + 0 ] = e[ 0 ];
b[ offset + 1 ] = a[ offset + 1 ] = e[ 1 ];
b[ offset + 2 ] = a[ offset + 2 ] = e[ 2 ];
b[ offset + 4 ] = a[ offset + 4 ] = e[ 3 ];
b[ offset + 5 ] = a[ offset + 5 ] = e[ 4 ];
b[ offset + 6 ] = a[ offset + 6 ] = e[ 5 ];
b[ offset + 8 ] = a[ offset + 8 ] = e[ 6 ];
b[ offset + 9 ] = a[ offset + 9 ] = e[ 7 ];
b[ offset + 10 ] = a[ offset + 10 ] = e[ 8 ];
updated = true;
}
return updated;
}
/**
* Updates a given Matrix4 uniform.
*
* @param {Matrix4Uniform} uniform - The Matrix4 uniform.
* @return {Boolean} Whether the uniform has been updated or not.
*/
updateMatrix4( uniform ) {
let updated = false;
const a = this.values;
const e = uniform.getValue().elements;
const offset = uniform.offset;
if ( arraysEqual( a, e, offset ) === false ) {
const b = this.buffer;
b.set( e, offset );
setArray( a, e, offset );
updated = true;
}
return updated;
}
/**
* Returns a typed array that matches the given data type.
*
* @param {String} type - The data type.
* @return {TypedArray} The typed array.
*/
_getBufferForType( type ) {
if ( type === 'int' || type === 'ivec2' || type === 'ivec3' || type === 'ivec4' ) return new Int32Array( this.buffer.buffer );
if ( type === 'uint' || type === 'uvec2' || type === 'uvec3' || type === 'uvec4' ) return new Uint32Array( this.buffer.buffer );
return this.buffer;
}
}
/**
* Sets the values of the second array to the first array.
*
* @private
* @param {TypedArray} a - The first array.
* @param {TypedArray} b - The second array.
* @param {Number} offset - An index offset for the first array.
*/
function setArray( a, b, offset ) {
for ( let i = 0, l = b.length; i < l; i ++ ) {
a[ offset + i ] = b[ i ];
}
}
/**
* Returns `true` if the given arrays are equal.
*
* @private
* @param {TypedArray} a - The first array.
* @param {TypedArray} b - The second array.
* @param {Number} offset - An index offset for the first array.
* @return {Boolean} Whether the given arrays are equal or not.
*/
function arraysEqual( a, b, offset ) {
for ( let i = 0, l = b.length; i < l; i ++ ) {
if ( a[ offset + i ] !== b[ i ] ) return false;
}
return true;
}
let _id$3 = 0;
/**
* A special form of uniforms group that represents
* the individual uniforms as node-based uniforms.
*
* @private
* @augments UniformsGroup
*/
class NodeUniformsGroup extends UniformsGroup {
/**
* Constructs a new node-based uniforms group.
*
* @param {String} name - The group's name.
* @param {UniformGroupNode} groupNode - The uniform group node.
*/
constructor( name, groupNode ) {
super( name );
/**
* The group's ID.
*
* @type {Number}
*/
this.id = _id$3 ++;
/**
* The uniform group node.
*
* @type {UniformGroupNode}
*/
this.groupNode = groupNode;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isNodeUniformsGroup = true;
}
}
let _id$2 = 0;
/**
* Represents a sampled texture binding type.
*
* @private
* @augments Binding
*/
class SampledTexture extends Binding {
/**
* Constructs a new sampled texture.
*
* @param {String} name - The sampled texture's name.
* @param {Texture?} texture - The texture this binding is referring to.
*/
constructor( name, texture ) {
super( name );
/**
* This identifier.
*
* @type {Number}
*/
this.id = _id$2 ++;
/**
* The texture this binding is referring to.
*
* @type {Texture?}
*/
this.texture = texture;
/**
* The binding's version.
*
* @type {Number}
*/
this.version = texture ? texture.version : 0;
/**
* Whether the texture is a storage texture or not.
*
* @type {Boolean}
* @default false
*/
this.store = false;
/**
* The binding's generation which is an additional version
* qualifier.
*
* @type {Number?}
* @default null
*/
this.generation = null;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isSampledTexture = true;
}
/**
* Returns `true` whether this binding requires an update for the
* given generation.
*
* @param {Number} generation - The generation.
* @return {Boolean} Whether an update is required or not.
*/
needsBindingsUpdate( generation ) {
const { texture } = this;
if ( generation !== this.generation ) {
this.generation = generation;
return true;
}
return texture.isVideoTexture;
}
/**
* Updates the binding.
*
* @return {Boolean} Whether the texture has been updated and must be
* uploaded to the GPU.
*/
update() {
const { texture, version } = this;
if ( version !== texture.version ) {
this.version = texture.version;
return true;
}
return false;
}
}
/**
* A special form of sampled texture binding type.
* It's texture value is managed by a node object.
*
* @private
* @augments SampledTexture
*/
class NodeSampledTexture extends SampledTexture {
/**
* Constructs a new node-based sampled texture.
*
* @param {String} name - The textures's name.
* @param {TextureNode} textureNode - The texture node.
* @param {UniformGroupNode} groupNode - The uniform group node.
* @param {String?} [access=null] - The access type.
*/
constructor( name, textureNode, groupNode, access = null ) {
super( name, textureNode ? textureNode.value : null );
/**
* The texture node.
*
* @type {TextureNode}
*/
this.textureNode = textureNode;
/**
* The uniform group node.
*
* @type {UniformGroupNode}
*/
this.groupNode = groupNode;
/**
* The access type.
*
* @type {String?}
* @default null
*/
this.access = access;
}
/**
* Overwrites the default to additionally check if the node value has changed.
*
* @param {Number} generation - The generation.
* @return {Boolean} Whether an update is required or not.
*/
needsBindingsUpdate( generation ) {
return this.textureNode.value !== this.texture || super.needsBindingsUpdate( generation );
}
/**
* Updates the binding.
*
* @return {Boolean} Whether the texture has been updated and must be
* uploaded to the GPU.
*/
update() {
const { textureNode } = this;
if ( this.texture !== textureNode.value ) {
this.texture = textureNode.value;
return true;
}
return super.update();
}
}
/**
* A special form of sampled cube texture binding type.
* It's texture value is managed by a node object.
*
* @private
* @augments NodeSampledTexture
*/
class NodeSampledCubeTexture extends NodeSampledTexture {
/**
* Constructs a new node-based sampled cube texture.
*
* @param {String} name - The textures's name.
* @param {TextureNode} textureNode - The texture node.
* @param {UniformGroupNode} groupNode - The uniform group node.
* @param {String?} [access=null] - The access type.
*/
constructor( name, textureNode, groupNode, access = null ) {
super( name, textureNode, groupNode, access );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isSampledCubeTexture = true;
}
}
/**
* A special form of sampled 3D texture binding type.
* It's texture value is managed by a node object.
*
* @private
* @augments NodeSampledTexture
*/
class NodeSampledTexture3D extends NodeSampledTexture {
/**
* Constructs a new node-based sampled 3D texture.
*
* @param {String} name - The textures's name.
* @param {TextureNode} textureNode - The texture node.
* @param {UniformGroupNode} groupNode - The uniform group node.
* @param {String?} [access=null] - The access type.
*/
constructor( name, textureNode, groupNode, access = null ) {
super( name, textureNode, groupNode, access );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isSampledTexture3D = true;
}
}
const glslMethods = {
textureDimensions: 'textureSize',
equals: 'equal'
};
const precisionLib = {
low: 'lowp',
medium: 'mediump',
high: 'highp'
};
const supports$1 = {
swizzleAssign: true,
storageBuffer: false
};
const defaultPrecisions = `
precision highp float;
precision highp int;
precision highp sampler2D;
precision highp sampler3D;
precision highp samplerCube;
precision highp sampler2DArray;
precision highp usampler2D;
precision highp usampler3D;
precision highp usamplerCube;
precision highp usampler2DArray;
precision highp isampler2D;
precision highp isampler3D;
precision highp isamplerCube;
precision highp isampler2DArray;
precision lowp sampler2DShadow;
`;
/**
* A node builder targeting GLSL.
*
* This module generates GLSL shader code from node materials and also
* generates the respective bindings and vertex buffer definitions. These
* data are later used by the renderer to create render and compute pipelines
* for render objects.
*
* @augments NodeBuilder
*/
class GLSLNodeBuilder extends NodeBuilder {
/**
* Constructs a new GLSL node builder renderer.
*
* @param {Object3D} object - The 3D object.
* @param {Renderer} renderer - The renderer.
*/
constructor( object, renderer ) {
super( object, renderer, new GLSLNodeParser() );
/**
* A dictionary holds for each shader stage ('vertex', 'fragment', 'compute')
* another dictionary which manages UBOs per group ('render','frame','object').
*
* @type {Object<String,Object<String,NodeUniformsGroup>>}
*/
this.uniformGroups = {};
/**
* An array that holds objects defining the varying and attribute data in
* context of Transform Feedback.
*
* @type {Object<String,Map<String,Object>>}
*/
this.transforms = [];
/**
* A dictionary that holds for each shader stage a Map of used extensions.
*
* @type {Object<String,Map<String,Object>>}
*/
this.extensions = {};
/**
* A dictionary that holds for each shader stage an Array of used builtins.
*
* @type {Object<String,Array<String>>}
*/
this.builtins = { vertex: [], fragment: [], compute: [] };
/**
* Whether comparison in shader code are generated with methods or not.
*
* @type {Boolean}
* @default true
*/
this.useComparisonMethod = true;
}
/**
* Checks if the given texture requires a manual conversion to the working color space.
*
* @param {Texture} texture - The texture to check.
* @return {Boolean} Whether the given texture requires a conversion to working color space or not.
*/
needsToWorkingColorSpace( texture ) {
return texture.isVideoTexture === true && texture.colorSpace !== NoColorSpace;
}
/**
* Returns the native shader method name for a given generic name.
*
* @param {String} method - The method name to resolve.
* @return {String} The resolved GLSL method name.
*/
getMethod( method ) {
return glslMethods[ method ] || method;
}
/**
* Returns the output struct name. Not relevant for GLSL.
*
* @return {String}
*/
getOutputStructName() {
return '';
}
/**
* Builds the given shader node.
*
* @param {ShaderNodeInternal} shaderNode - The shader node.
* @return {String} The GLSL function code.
*/
buildFunctionCode( shaderNode ) {
const layout = shaderNode.layout;
const flowData = this.flowShaderNode( shaderNode );
const parameters = [];
for ( const input of layout.inputs ) {
parameters.push( this.getType( input.type ) + ' ' + input.name );
}
//
const code = `${ this.getType( layout.type ) } ${ layout.name }( ${ parameters.join( ', ' ) } ) {
${ flowData.vars }
${ flowData.code }
return ${ flowData.result };
}`;
//
return code;
}
/**
* Setups the Pixel Buffer Object (PBO) for the given storage
* buffer node.
*
* @param {StorageBufferNode} storageBufferNode - The storage buffer node.
*/
setupPBO( storageBufferNode ) {
const attribute = storageBufferNode.value;
if ( attribute.pbo === undefined ) {
const originalArray = attribute.array;
const numElements = attribute.count * attribute.itemSize;
const { itemSize } = attribute;
const isInteger = attribute.array.constructor.name.toLowerCase().includes( 'int' );
let format = isInteger ? RedIntegerFormat : RedFormat;
if ( itemSize === 2 ) {
format = isInteger ? RGIntegerFormat : RGFormat;
} else if ( itemSize === 3 ) {
format = isInteger ? RGBIntegerFormat : RGBFormat;
} else if ( itemSize === 4 ) {
format = isInteger ? RGBAIntegerFormat : RGBAFormat;
}
const typeMap = {
Float32Array: FloatType,
Uint8Array: UnsignedByteType,
Uint16Array: UnsignedShortType,
Uint32Array: UnsignedIntType,
Int8Array: ByteType,
Int16Array: ShortType,
Int32Array: IntType,
Uint8ClampedArray: UnsignedByteType,
};
const width = Math.pow( 2, Math.ceil( Math.log2( Math.sqrt( numElements / itemSize ) ) ) );
let height = Math.ceil( ( numElements / itemSize ) / width );
if ( width * height * itemSize < numElements ) height ++; // Ensure enough space
const newSize = width * height * itemSize;
const newArray = new originalArray.constructor( newSize );
newArray.set( originalArray, 0 );
attribute.array = newArray;
const pboTexture = new DataTexture( attribute.array, width, height, format, typeMap[ attribute.array.constructor.name ] || FloatType );
pboTexture.needsUpdate = true;
pboTexture.isPBOTexture = true;
const pbo = new TextureNode( pboTexture, null, null );
pbo.setPrecision( 'high' );
attribute.pboNode = pbo;
attribute.pbo = pbo.value;
this.getUniformFromNode( attribute.pboNode, 'texture', this.shaderStage, this.context.label );
}
}
/**
* Returns a GLSL snippet that represents the property name of the given node.
*
* @param {Node} node - The node.
* @param {String} [shaderStage=this.shaderStage] - The shader stage this code snippet is generated for.
* @return {String} The property name.
*/
getPropertyName( node, shaderStage = this.shaderStage ) {
if ( node.isNodeUniform && node.node.isTextureNode !== true && node.node.isBufferNode !== true ) {
return shaderStage.charAt( 0 ) + '_' + node.name;
}
return super.getPropertyName( node, shaderStage );
}
/**
* Setups the Pixel Buffer Object (PBO) for the given storage
* buffer node.
*
* @param {StorageArrayElementNode} storageArrayElementNode - The storage array element node.
* @return {String} The property name.
*/
generatePBO( storageArrayElementNode ) {
const { node, indexNode } = storageArrayElementNode;
const attribute = node.value;
if ( this.renderer.backend.has( attribute ) ) {
const attributeData = this.renderer.backend.get( attribute );
attributeData.pbo = attribute.pbo;
}
const nodeUniform = this.getUniformFromNode( attribute.pboNode, 'texture', this.shaderStage, this.context.label );
const textureName = this.getPropertyName( nodeUniform );
this.increaseUsage( indexNode ); // force cache generate to be used as index in x,y
const indexSnippet = indexNode.build( this, 'uint' );
const elementNodeData = this.getDataFromNode( storageArrayElementNode );
let propertyName = elementNodeData.propertyName;
if ( propertyName === undefined ) {
// property element
const nodeVar = this.getVarFromNode( storageArrayElementNode );
propertyName = this.getPropertyName( nodeVar );
// property size
const bufferNodeData = this.getDataFromNode( node );
let propertySizeName = bufferNodeData.propertySizeName;
if ( propertySizeName === undefined ) {
propertySizeName = propertyName + 'Size';
this.getVarFromNode( node, propertySizeName, 'uint' );
this.addLineFlowCode( `${ propertySizeName } = uint( textureSize( ${ textureName }, 0 ).x )`, storageArrayElementNode );
bufferNodeData.propertySizeName = propertySizeName;
}
//
const { itemSize } = attribute;
const channel = '.' + vectorComponents.join( '' ).slice( 0, itemSize );
const uvSnippet = `ivec2(${indexSnippet} % ${ propertySizeName }, ${indexSnippet} / ${ propertySizeName })`;
const snippet = this.generateTextureLoad( null, textureName, uvSnippet, null, '0' );
//
let prefix = 'vec4';
if ( attribute.pbo.type === UnsignedIntType ) {
prefix = 'uvec4';
} else if ( attribute.pbo.type === IntType ) {
prefix = 'ivec4';
}
this.addLineFlowCode( `${ propertyName } = ${prefix}(${ snippet })${channel}`, storageArrayElementNode );
elementNodeData.propertyName = propertyName;
}
return propertyName;
}
/**
* Generates the GLSL snippet that reads a single texel from a texture without sampling or filtering.
*
* @param {Texture} texture - The texture.
* @param {String} textureProperty - The name of the texture uniform in the shader.
* @param {String} uvIndexSnippet - A GLSL snippet that represents texture coordinates used for sampling.
* @param {String?} depthSnippet - A GLSL snippet that represents the 0-based texture array index to sample.
* @param {String} [levelSnippet='0u'] - A GLSL snippet that represents the mip level, with level 0 containing a full size version of the texture.
* @return {String} The GLSL snippet.
*/
generateTextureLoad( texture, textureProperty, uvIndexSnippet, depthSnippet, levelSnippet = '0' ) {
if ( depthSnippet ) {
return `texelFetch( ${ textureProperty }, ivec3( ${ uvIndexSnippet }, ${ depthSnippet } ), ${ levelSnippet } )`;
} else {
return `texelFetch( ${ textureProperty }, ${ uvIndexSnippet }, ${ levelSnippet } )`;
}
}
/**
* Generates the GLSL snippet for sampling/loading the given texture.
*
* @param {Texture} texture - The texture.
* @param {String} textureProperty - The name of the texture uniform in the shader.
* @param {String} uvSnippet - A GLSL snippet that represents texture coordinates used for sampling.
* @param {String?} depthSnippet - A GLSL snippet that represents the 0-based texture array index to sample.
* @return {String} The GLSL snippet.
*/
generateTexture( texture, textureProperty, uvSnippet, depthSnippet ) {
if ( texture.isDepthTexture ) {
return `texture( ${ textureProperty }, ${ uvSnippet } ).x`;
} else {
if ( depthSnippet ) uvSnippet = `vec3( ${ uvSnippet }, ${ depthSnippet } )`;
return `texture( ${ textureProperty }, ${ uvSnippet } )`;
}
}
/**
* Generates the GLSL snippet when sampling textures with explicit mip level.
*
* @param {Texture} texture - The texture.
* @param {String} textureProperty - The name of the texture uniform in the shader.
* @param {String} uvSnippet - A GLSL snippet that represents texture coordinates used for sampling.
* @param {String} levelSnippet - A GLSL snippet that represents the mip level, with level 0 containing a full size version of the texture.
* @return {String} The GLSL snippet.
*/
generateTextureLevel( texture, textureProperty, uvSnippet, levelSnippet ) {
return `textureLod( ${ textureProperty }, ${ uvSnippet }, ${ levelSnippet } )`;
}
/**
* Generates the GLSL snippet when sampling textures with a bias to the mip level.
*
* @param {Texture} texture - The texture.
* @param {String} textureProperty - The name of the texture uniform in the shader.
* @param {String} uvSnippet - A GLSL snippet that represents texture coordinates used for sampling.
* @param {String} biasSnippet - A GLSL snippet that represents the bias to apply to the mip level before sampling.
* @return {String} The GLSL snippet.
*/
generateTextureBias( texture, textureProperty, uvSnippet, biasSnippet ) {
return `texture( ${ textureProperty }, ${ uvSnippet }, ${ biasSnippet } )`;
}
/**
* Generates the GLSL snippet for sampling/loading the given texture using explicit gradients.
*
* @param {Texture} texture - The texture.
* @param {String} textureProperty - The name of the texture uniform in the shader.
* @param {String} uvSnippet - A GLSL snippet that represents texture coordinates used for sampling.
* @param {Array<String>} gradSnippet - An array holding both gradient GLSL snippets.
* @return {String} The GLSL snippet.
*/
generateTextureGrad( texture, textureProperty, uvSnippet, gradSnippet ) {
return `textureGrad( ${ textureProperty }, ${ uvSnippet }, ${ gradSnippet[ 0 ] }, ${ gradSnippet[ 1 ] } )`;
}
/**
* Generates the GLSL snippet for sampling a depth texture and comparing the sampled depth values
* against a reference value.
*
* @param {Texture} texture - The texture.
* @param {String} textureProperty - The name of the texture uniform in the shader.
* @param {String} uvSnippet - A GLSL snippet that represents texture coordinates used for sampling.
* @param {String} compareSnippet - A GLSL snippet that represents the reference value.
* @param {String?} depthSnippet - A GLSL snippet that represents 0-based texture array index to sample.
* @param {String} [shaderStage=this.shaderStage] - The shader stage this code snippet is generated for.
* @return {String} The GLSL snippet.
*/
generateTextureCompare( texture, textureProperty, uvSnippet, compareSnippet, depthSnippet, shaderStage = this.shaderStage ) {
if ( shaderStage === 'fragment' ) {
return `texture( ${ textureProperty }, vec3( ${ uvSnippet }, ${ compareSnippet } ) )`;
} else {
console.error( `WebGPURenderer: THREE.DepthTexture.compareFunction() does not support ${ shaderStage } shader.` );
}
}
/**
* Returns the variables of the given shader stage as a GLSL string.
*
* @param {String} shaderStage - The shader stage.
* @return {String} The GLSL snippet that defines the variables.
*/
getVars( shaderStage ) {
const snippets = [];
const vars = this.vars[ shaderStage ];
if ( vars !== undefined ) {
for ( const variable of vars ) {
snippets.push( `${ this.getVar( variable.type, variable.name, variable.count ) };` );
}
}
return snippets.join( '\n\t' );
}
/**
* Returns the uniforms of the given shader stage as a GLSL string.
*
* @param {String} shaderStage - The shader stage.
* @return {String} The GLSL snippet that defines the uniforms.
*/
getUniforms( shaderStage ) {
const uniforms = this.uniforms[ shaderStage ];
const bindingSnippets = [];
const uniformGroups = {};
for ( const uniform of uniforms ) {
let snippet = null;
let group = false;
if ( uniform.type === 'texture' ) {
const texture = uniform.node.value;
let typePrefix = '';
if ( texture.isDataTexture === true ) {
if ( texture.type === UnsignedIntType ) {
typePrefix = 'u';
} else if ( texture.type === IntType ) {
typePrefix = 'i';
}
}
if ( texture.compareFunction ) {
snippet = `sampler2DShadow ${ uniform.name };`;
} else if ( texture.isDataArrayTexture === true || texture.isCompressedArrayTexture === true ) {
snippet = `${typePrefix}sampler2DArray ${ uniform.name };`;
} else {
snippet = `${typePrefix}sampler2D ${ uniform.name };`;
}
} else if ( uniform.type === 'cubeTexture' ) {
snippet = `samplerCube ${ uniform.name };`;
} else if ( uniform.type === 'texture3D' ) {
snippet = `sampler3D ${ uniform.name };`;
} else if ( uniform.type === 'buffer' ) {
const bufferNode = uniform.node;
const bufferType = this.getType( bufferNode.bufferType );
const bufferCount = bufferNode.bufferCount;
const bufferCountSnippet = bufferCount > 0 ? bufferCount : '';
snippet = `${bufferNode.name} {\n\t${ bufferType } ${ uniform.name }[${ bufferCountSnippet }];\n};\n`;
} else {
const vectorType = this.getVectorType( uniform.type );
snippet = `${ vectorType } ${ this.getPropertyName( uniform, shaderStage ) };`;
group = true;
}
const precision = uniform.node.precision;
if ( precision !== null ) {
snippet = precisionLib[ precision ] + ' ' + snippet;
}
if ( group ) {
snippet = '\t' + snippet;
const groupName = uniform.groupNode.name;
const groupSnippets = uniformGroups[ groupName ] || ( uniformGroups[ groupName ] = [] );
groupSnippets.push( snippet );
} else {
snippet = 'uniform ' + snippet;
bindingSnippets.push( snippet );
}
}
let output = '';
for ( const name in uniformGroups ) {
const groupSnippets = uniformGroups[ name ];
output += this._getGLSLUniformStruct( shaderStage + '_' + name, groupSnippets.join( '\n' ) ) + '\n';
}
output += bindingSnippets.join( '\n' );
return output;
}
/**
* Returns the type for a given buffer attribute.
*
* @param {BufferAttribute} attribute - The buffer attribute.
* @return {String} The type.
*/
getTypeFromAttribute( attribute ) {
let nodeType = super.getTypeFromAttribute( attribute );
if ( /^[iu]/.test( nodeType ) && attribute.gpuType !== IntType ) {
let dataAttribute = attribute;
if ( attribute.isInterleavedBufferAttribute ) dataAttribute = attribute.data;
const array = dataAttribute.array;
if ( ( array instanceof Uint32Array || array instanceof Int32Array ) === false ) {
nodeType = nodeType.slice( 1 );
}
}
return nodeType;
}
/**
* Returns the shader attributes of the given shader stage as a GLSL string.
*
* @param {String} shaderStage - The shader stage.
* @return {String} The GLSL snippet that defines the shader attributes.
*/
getAttributes( shaderStage ) {
let snippet = '';
if ( shaderStage === 'vertex' || shaderStage === 'compute' ) {
const attributes = this.getAttributesArray();
let location = 0;
for ( const attribute of attributes ) {
snippet += `layout( location = ${ location ++ } ) in ${ attribute.type } ${ attribute.name };\n`;
}
}
return snippet;
}
/**
* Returns the members of the given struct type node as a GLSL string.
*
* @param {StructTypeNode} struct - The struct type node.
* @return {String} The GLSL snippet that defines the struct members.
*/
getStructMembers( struct ) {
const snippets = [];
for ( const member of struct.members ) {
snippets.push( `\t${ member.type } ${ member.name };` );
}
return snippets.join( '\n' );
}
/**
* Returns the structs of the given shader stage as a GLSL string.
*
* @param {String} shaderStage - The shader stage.
* @return {String} The GLSL snippet that defines the structs.
*/
getStructs( shaderStage ) {
const snippets = [];
const structs = this.structs[ shaderStage ];
const outputSnippet = [];
for ( const struct of structs ) {
if ( struct.output ) {
for ( const member of struct.members ) {
outputSnippet.push( `layout( location = ${ member.index } ) out ${ member.type } ${ member.name };` );
}
} else {
let snippet = 'struct ' + struct.name + ' {\n';
snippet += this.getStructMembers( struct );
snippet += '\n};\n';
snippets.push( snippet );
}
}
if ( outputSnippet.length === 0 ) {
outputSnippet.push( 'layout( location = 0 ) out vec4 fragColor;' );
}
return '\n' + outputSnippet.join( '\n' ) + '\n\n' + snippets.join( '\n' );
}
/**
* Returns the varyings of the given shader stage as a GLSL string.
*
* @param {String} shaderStage - The shader stage.
* @return {String} The GLSL snippet that defines the varyings.
*/
getVaryings( shaderStage ) {
let snippet = '';
const varyings = this.varyings;
if ( shaderStage === 'vertex' || shaderStage === 'compute' ) {
for ( const varying of varyings ) {
if ( shaderStage === 'compute' ) varying.needsInterpolation = true;
const type = this.getType( varying.type );
if ( varying.needsInterpolation ) {
const flat = type.includes( 'int' ) || type.includes( 'uv' ) || type.includes( 'iv' ) ? 'flat ' : '';
snippet += `${flat} out ${type} ${varying.name};\n`;
} else {
snippet += `${type} ${varying.name};\n`; // generate variable (no varying required)
}
}
} else if ( shaderStage === 'fragment' ) {
for ( const varying of varyings ) {
if ( varying.needsInterpolation ) {
const type = this.getType( varying.type );
const flat = type.includes( 'int' ) || type.includes( 'uv' ) || type.includes( 'iv' ) ? 'flat ' : '';
snippet += `${flat}in ${type} ${varying.name};\n`;
}
}
}
for ( const builtin of this.builtins[ shaderStage ] ) {
snippet += `${builtin};\n`;
}
return snippet;
}
/**
* Returns the vertex index builtin.
*
* @return {String} The vertex index.
*/
getVertexIndex() {
return 'uint( gl_VertexID )';
}
/**
* Returns the instance index builtin.
*
* @return {String} The instance index.
*/
getInstanceIndex() {
return 'uint( gl_InstanceID )';
}
/**
* Returns the invocation local index builtin.
*
* @return {String} The invocation local index.
*/
getInvocationLocalIndex() {
const workgroupSize = this.object.workgroupSize;
const size = workgroupSize.reduce( ( acc, curr ) => acc * curr, 1 );
return `uint( gl_InstanceID ) % ${size}u`;
}
/**
* Returns the draw index builtin.
*
* @return {String?} The drawIndex shader string. Returns `null` if `WEBGL_multi_draw` isn't supported by the device.
*/
getDrawIndex() {
const extensions = this.renderer.backend.extensions;
if ( extensions.has( 'WEBGL_multi_draw' ) ) {
return 'uint( gl_DrawID )';
}
return null;
}
/**
* Returns the front facing builtin.
*
* @return {String} The front facing builtin.
*/
getFrontFacing() {
return 'gl_FrontFacing';
}
/**
* Returns the frag coord builtin.
*
* @return {String} The frag coord builtin.
*/
getFragCoord() {
return 'gl_FragCoord.xy';
}
/**
* Returns the frag depth builtin.
*
* @return {String} The frag depth builtin.
*/
getFragDepth() {
return 'gl_FragDepth';
}
/**
* Enables the given extension.
*
* @param {String} name - The extension name.
* @param {String} behavior - The extension behavior.
* @param {String} [shaderStage=this.shaderStage] - The shader stage.
*/
enableExtension( name, behavior, shaderStage = this.shaderStage ) {
const map = this.extensions[ shaderStage ] || ( this.extensions[ shaderStage ] = new Map() );
if ( map.has( name ) === false ) {
map.set( name, {
name,
behavior
} );
}
}
/**
* Returns the enabled extensions of the given shader stage as a GLSL string.
*
* @param {String} shaderStage - The shader stage.
* @return {String} The GLSL snippet that defines the enabled extensions.
*/
getExtensions( shaderStage ) {
const snippets = [];
if ( shaderStage === 'vertex' ) {
const ext = this.renderer.backend.extensions;
const isBatchedMesh = this.object.isBatchedMesh;
if ( isBatchedMesh && ext.has( 'WEBGL_multi_draw' ) ) {
this.enableExtension( 'GL_ANGLE_multi_draw', 'require', shaderStage );
}
}
const extensions = this.extensions[ shaderStage ];
if ( extensions !== undefined ) {
for ( const { name, behavior } of extensions.values() ) {
snippets.push( `#extension ${name} : ${behavior}` );
}
}
return snippets.join( '\n' );
}
/**
* Returns the clip distances builtin.
*
* @return {String} The clip distances builtin.
*/
getClipDistance() {
return 'gl_ClipDistance';
}
/**
* Whether the requested feature is available or not.
*
* @param {String} name - The requested feature.
* @return {Boolean} Whether the requested feature is supported or not.
*/
isAvailable( name ) {
let result = supports$1[ name ];
if ( result === undefined ) {
let extensionName;
result = false;
switch ( name ) {
case 'float32Filterable':
extensionName = 'OES_texture_float_linear';
break;
case 'clipDistance':
extensionName = 'WEBGL_clip_cull_distance';
break;
}
if ( extensionName !== undefined ) {
const extensions = this.renderer.backend.extensions;
if ( extensions.has( extensionName ) ) {
extensions.get( extensionName );
result = true;
}
}
supports$1[ name ] = result;
}
return result;
}
/**
* Whether to flip texture data along its vertical axis or not.
*
* @return {Boolean} Returns always `true` in context of GLSL.
*/
isFlipY() {
return true;
}
/**
* Enables hardware clipping.
*
* @param {String} planeCount - The clipping plane count.
*/
enableHardwareClipping( planeCount ) {
this.enableExtension( 'GL_ANGLE_clip_cull_distance', 'require' );
this.builtins[ 'vertex' ].push( `out float gl_ClipDistance[ ${ planeCount } ]` );
}
/**
* Registers a transform in context of Transform Feedback.
*
* @param {String} varyingName - The varying name.
* @param {AttributeNode} attributeNode - The attribute node.
*/
registerTransform( varyingName, attributeNode ) {
this.transforms.push( { varyingName, attributeNode } );
}
/**
* Returns the transforms of the given shader stage as a GLSL string.
*
* @param {String} shaderStage - The shader stage.
* @return {String} The GLSL snippet that defines the transforms.
*/
getTransforms( /* shaderStage */ ) {
const transforms = this.transforms;
let snippet = '';
for ( let i = 0; i < transforms.length; i ++ ) {
const transform = transforms[ i ];
const attributeName = this.getPropertyName( transform.attributeNode );
snippet += `${ transform.varyingName } = ${ attributeName };\n\t`;
}
return snippet;
}
/**
* Returns a GLSL struct based on the given name and variables.
*
* @private
* @param {String} name - The struct name.
* @param {String} vars - The struct variables.
* @return {String} The GLSL snippet representing a struct.
*/
_getGLSLUniformStruct( name, vars ) {
return `
layout( std140 ) uniform ${name} {
${vars}
};`;
}
/**
* Returns a GLSL vertex shader based on the given shader data.
*
* @private
* @param {Object} shaderData - The shader data.
* @return {String} The vertex shader.
*/
_getGLSLVertexCode( shaderData ) {
return `#version 300 es
${ this.getSignature() }
// extensions
${shaderData.extensions}
// precision
${ defaultPrecisions }
// uniforms
${shaderData.uniforms}
// varyings
${shaderData.varyings}
// attributes
${shaderData.attributes}
// codes
${shaderData.codes}
void main() {
// vars
${shaderData.vars}
// transforms
${shaderData.transforms}
// flow
${shaderData.flow}
gl_PointSize = 1.0;
}
`;
}
/**
* Returns a GLSL fragment shader based on the given shader data.
*
* @private
* @param {Object} shaderData - The shader data.
* @return {String} The vertex shader.
*/
_getGLSLFragmentCode( shaderData ) {
return `#version 300 es
${ this.getSignature() }
// precision
${ defaultPrecisions }
// uniforms
${shaderData.uniforms}
// varyings
${shaderData.varyings}
// codes
${shaderData.codes}
// structs
${shaderData.structs}
void main() {
// vars
${shaderData.vars}
// flow
${shaderData.flow}
}
`;
}
/**
* Controls the code build of the shader stages.
*/
buildCode() {
const shadersData = this.material !== null ? { fragment: {}, vertex: {} } : { compute: {} };
this.sortBindingGroups();
for ( const shaderStage in shadersData ) {
let flow = '// code\n\n';
flow += this.flowCode[ shaderStage ];
const flowNodes = this.flowNodes[ shaderStage ];
const mainNode = flowNodes[ flowNodes.length - 1 ];
for ( const node of flowNodes ) {
const flowSlotData = this.getFlowData( node/*, shaderStage*/ );
const slotName = node.name;
if ( slotName ) {
if ( flow.length > 0 ) flow += '\n';
flow += `\t// flow -> ${ slotName }\n\t`;
}
flow += `${ flowSlotData.code }\n\t`;
if ( node === mainNode && shaderStage !== 'compute' ) {
flow += '// result\n\t';
if ( shaderStage === 'vertex' ) {
flow += 'gl_Position = ';
flow += `${ flowSlotData.result };`;
} else if ( shaderStage === 'fragment' ) {
if ( ! node.outputNode.isOutputStructNode ) {
flow += 'fragColor = ';
flow += `${ flowSlotData.result };`;
}
}
}
}
const stageData = shadersData[ shaderStage ];
stageData.extensions = this.getExtensions( shaderStage );
stageData.uniforms = this.getUniforms( shaderStage );
stageData.attributes = this.getAttributes( shaderStage );
stageData.varyings = this.getVaryings( shaderStage );
stageData.vars = this.getVars( shaderStage );
stageData.structs = this.getStructs( shaderStage );
stageData.codes = this.getCodes( shaderStage );
stageData.transforms = this.getTransforms( shaderStage );
stageData.flow = flow;
}
if ( this.material !== null ) {
this.vertexShader = this._getGLSLVertexCode( shadersData.vertex );
this.fragmentShader = this._getGLSLFragmentCode( shadersData.fragment );
} else {
this.computeShader = this._getGLSLVertexCode( shadersData.compute );
}
}
/**
* This method is one of the more important ones since it's responsible
* for generating a matching binding instance for the given uniform node.
*
* These bindings are later used in the renderer to create bind groups
* and layouts.
*
* @param {UniformNode} node - The uniform node.
* @param {String} type - The node data type.
* @param {String} shaderStage - The shader stage.
* @param {String?} [name=null] - An optional uniform name.
* @return {NodeUniform} The node uniform object.
*/
getUniformFromNode( node, type, shaderStage, name = null ) {
const uniformNode = super.getUniformFromNode( node, type, shaderStage, name );
const nodeData = this.getDataFromNode( node, shaderStage, this.globalCache );
let uniformGPU = nodeData.uniformGPU;
if ( uniformGPU === undefined ) {
const group = node.groupNode;
const groupName = group.name;
const bindings = this.getBindGroupArray( groupName, shaderStage );
if ( type === 'texture' ) {
uniformGPU = new NodeSampledTexture( uniformNode.name, uniformNode.node, group );
bindings.push( uniformGPU );
} else if ( type === 'cubeTexture' ) {
uniformGPU = new NodeSampledCubeTexture( uniformNode.name, uniformNode.node, group );
bindings.push( uniformGPU );
} else if ( type === 'texture3D' ) {
uniformGPU = new NodeSampledTexture3D( uniformNode.name, uniformNode.node, group );
bindings.push( uniformGPU );
} else if ( type === 'buffer' ) {
node.name = `NodeBuffer_${ node.id }`;
uniformNode.name = `buffer${ node.id }`;
const buffer = new NodeUniformBuffer( node, group );
buffer.name = node.name;
bindings.push( buffer );
uniformGPU = buffer;
} else {
const uniformsStage = this.uniformGroups[ shaderStage ] || ( this.uniformGroups[ shaderStage ] = {} );
let uniformsGroup = uniformsStage[ groupName ];
if ( uniformsGroup === undefined ) {
uniformsGroup = new NodeUniformsGroup( shaderStage + '_' + groupName, group );
//uniformsGroup.setVisibility( gpuShaderStageLib[ shaderStage ] );
uniformsStage[ groupName ] = uniformsGroup;
bindings.push( uniformsGroup );
}
uniformGPU = this.getNodeUniform( uniformNode, type );
uniformsGroup.addUniform( uniformGPU );
}
nodeData.uniformGPU = uniformGPU;
}
return uniformNode;
}
}
let _vector2 = null;
let _color4 = null;
/**
* Most of the rendering related logic is implemented in the
* {@link module:Renderer} module and related management components.
* Sometimes it is required though to execute commands which are
* specific to the current 3D backend (which is WebGPU or WebGL 2).
* This abstract base class defines an interface that encapsulates
* all backend-related logic. Derived classes for each backend must
* implement the interface.
*
* @abstract
* @private
*/
class Backend {
/**
* Constructs a new backend.
*
* @param {Object} parameters - An object holding parameters for the backend.
*/
constructor( parameters = {} ) {
/**
* The parameters of the backend.
*
* @type {Object}
*/
this.parameters = Object.assign( {}, parameters );
/**
* This weak map holds backend-specific data of objects
* like textures, attributes or render targets.
*
* @type {WeakMap}
*/
this.data = new WeakMap();
/**
* A reference to the renderer.
*
* @type {Renderer?}
* @default null
*/
this.renderer = null;
/**
* A reference to the canvas element the renderer is drawing to.
*
* @type {(HTMLCanvasElement|OffscreenCanvas)?}
* @default null
*/
this.domElement = null;
/**
* A reference to the timestamp query pool.
*
* @type {{render: TimestampQueryPool?, compute: TimestampQueryPool?}}
*/
this.timestampQueryPool = {
'render': null,
'compute': null
};
}
/**
* Initializes the backend so it is ready for usage. Concrete backends
* are supposed to implement their rendering context creation and related
* operations in this method.
*
* @async
* @param {Renderer} renderer - The renderer.
* @return {Promise} A Promise that resolves when the backend has been initialized.
*/
async init( renderer ) {
this.renderer = renderer;
}
/**
* The coordinate system of the backend.
*
* @abstract
* @type {Number}
* @readonly
*/
get coordinateSystem() {}
// render context
/**
* This method is executed at the beginning of a render call and
* can be used by the backend to prepare the state for upcoming
* draw calls.
*
* @abstract
* @param {RenderContext} renderContext - The render context.
*/
beginRender( /*renderContext*/ ) {}
/**
* This method is executed at the end of a render call and
* can be used by the backend to finalize work after draw
* calls.
*
* @abstract
* @param {RenderContext} renderContext - The render context.
*/
finishRender( /*renderContext*/ ) {}
/**
* This method is executed at the beginning of a compute call and
* can be used by the backend to prepare the state for upcoming
* compute tasks.
*
* @abstract
* @param {Node|Array<Node>} computeGroup - The compute node(s).
*/
beginCompute( /*computeGroup*/ ) {}
/**
* This method is executed at the end of a compute call and
* can be used by the backend to finalize work after compute
* tasks.
*
* @abstract
* @param {Node|Array<Node>} computeGroup - The compute node(s).
*/
finishCompute( /*computeGroup*/ ) {}
// render object
/**
* Executes a draw command for the given render object.
*
* @abstract
* @param {RenderObject} renderObject - The render object to draw.
* @param {Info} info - Holds a series of statistical information about the GPU memory and the rendering process.
*/
draw( /*renderObject, info*/ ) { }
// compute node
/**
* Executes a compute command for the given compute node.
*
* @abstract
* @param {Node|Array<Node>} computeGroup - The group of compute nodes of a compute call. Can be a single compute node.
* @param {Node} computeNode - The compute node.
* @param {Array<BindGroup>} bindings - The bindings.
* @param {ComputePipeline} computePipeline - The compute pipeline.
*/
compute( /*computeGroup, computeNode, computeBindings, computePipeline*/ ) { }
// program
/**
* Creates a shader program from the given programmable stage.
*
* @abstract
* @param {ProgrammableStage} program - The programmable stage.
*/
createProgram( /*program*/ ) { }
/**
* Destroys the shader program of the given programmable stage.
*
* @abstract
* @param {ProgrammableStage} program - The programmable stage.
*/
destroyProgram( /*program*/ ) { }
// bindings
/**
* Creates bindings from the given bind group definition.
*
* @abstract
* @param {BindGroup} bindGroup - The bind group.
* @param {Array<BindGroup>} bindings - Array of bind groups.
* @param {Number} cacheIndex - The cache index.
* @param {Number} version - The version.
*/
createBindings( /*bindGroup, bindings, cacheIndex, version*/ ) { }
/**
* Updates the given bind group definition.
*
* @abstract
* @param {BindGroup} bindGroup - The bind group.
* @param {Array<BindGroup>} bindings - Array of bind groups.
* @param {Number} cacheIndex - The cache index.
* @param {Number} version - The version.
*/
updateBindings( /*bindGroup, bindings, cacheIndex, version*/ ) { }
/**
* Updates a buffer binding.
*
* @abstract
* @param {Buffer} binding - The buffer binding to update.
*/
updateBinding( /*binding*/ ) { }
// pipeline
/**
* Creates a render pipeline for the given render object.
*
* @abstract
* @param {RenderObject} renderObject - The render object.
* @param {Array<Promise>} promises - An array of compilation promises which are used in `compileAsync()`.
*/
createRenderPipeline( /*renderObject, promises*/ ) { }
/**
* Creates a compute pipeline for the given compute node.
*
* @abstract
* @param {ComputePipeline} computePipeline - The compute pipeline.
* @param {Array<BindGroup>} bindings - The bindings.
*/
createComputePipeline( /*computePipeline, bindings*/ ) { }
// cache key
/**
* Returns `true` if the render pipeline requires an update.
*
* @abstract
* @param {RenderObject} renderObject - The render object.
* @return {Boolean} Whether the render pipeline requires an update or not.
*/
needsRenderUpdate( /*renderObject*/ ) { }
/**
* Returns a cache key that is used to identify render pipelines.
*
* @abstract
* @param {RenderObject} renderObject - The render object.
* @return {String} The cache key.
*/
getRenderCacheKey( /*renderObject*/ ) { }
// node builder
/**
* Returns a node builder for the given render object.
*
* @abstract
* @param {RenderObject} renderObject - The render object.
* @param {Renderer} renderer - The renderer.
* @return {NodeBuilder} The node builder.
*/
createNodeBuilder( /*renderObject, renderer*/ ) { }
// textures
/**
* Creates a GPU sampler for the given texture.
*
* @abstract
* @param {Texture} texture - The texture to create the sampler for.
*/
createSampler( /*texture*/ ) { }
/**
* Destroys the GPU sampler for the given texture.
*
* @abstract
* @param {Texture} texture - The texture to destroy the sampler for.
*/
destroySampler( /*texture*/ ) {}
/**
* Creates a default texture for the given texture that can be used
* as a placeholder until the actual texture is ready for usage.
*
* @abstract
* @param {Texture} texture - The texture to create a default texture for.
*/
createDefaultTexture( /*texture*/ ) { }
/**
* Defines a texture on the GPU for the given texture object.
*
* @abstract
* @param {Texture} texture - The texture.
* @param {Object} [options={}] - Optional configuration parameter.
*/
createTexture( /*texture, options={}*/ ) { }
/**
* Uploads the updated texture data to the GPU.
*
* @abstract
* @param {Texture} texture - The texture.
* @param {Object} [options={}] - Optional configuration parameter.
*/
updateTexture( /*texture, options = {}*/ ) { }
/**
* Generates mipmaps for the given texture.
*
* @abstract
* @param {Texture} texture - The texture.
*/
generateMipmaps( /*texture*/ ) { }
/**
* Destroys the GPU data for the given texture object.
*
* @abstract
* @param {Texture} texture - The texture.
*/
destroyTexture( /*texture*/ ) { }
/**
* Returns texture data as a typed array.
*
* @abstract
* @async
* @param {Texture} texture - The texture to copy.
* @param {Number} x - The x coordinate of the copy origin.
* @param {Number} y - The y coordinate of the copy origin.
* @param {Number} width - The width of the copy.
* @param {Number} height - The height of the copy.
* @param {Number} faceIndex - The face index.
* @return {Promise<TypedArray>} A Promise that resolves with a typed array when the copy operation has finished.
*/
async copyTextureToBuffer( /*texture, x, y, width, height, faceIndex*/ ) {}
/**
* Copies data of the given source texture to the given destination texture.
*
* @abstract
* @param {Texture} srcTexture - The source texture.
* @param {Texture} dstTexture - The destination texture.
* @param {Vector4?} [srcRegion=null] - The region of the source texture to copy.
* @param {(Vector2|Vector3)?} [dstPosition=null] - The destination position of the copy.
* @param {Number} [level=0] - The mip level to copy.
*/
copyTextureToTexture( /*srcTexture, dstTexture, srcRegion = null, dstPosition = null, level = 0*/ ) {}
/**
* Copies the current bound framebuffer to the given texture.
*
* @abstract
* @param {Texture} texture - The destination texture.
* @param {RenderContext} renderContext - The render context.
* @param {Vector4} rectangle - A four dimensional vector defining the origin and dimension of the copy.
*/
copyFramebufferToTexture( /*texture, renderContext, rectangle*/ ) {}
// attributes
/**
* Creates the GPU buffer of a shader attribute.
*
* @abstract
* @param {BufferAttribute} attribute - The buffer attribute.
*/
createAttribute( /*attribute*/ ) { }
/**
* Creates the GPU buffer of an indexed shader attribute.
*
* @abstract
* @param {BufferAttribute} attribute - The indexed buffer attribute.
*/
createIndexAttribute( /*attribute*/ ) { }
/**
* Creates the GPU buffer of a storage attribute.
*
* @abstract
* @param {BufferAttribute} attribute - The buffer attribute.
*/
createStorageAttribute( /*attribute*/ ) { }
/**
* Updates the GPU buffer of a shader attribute.
*
* @abstract
* @param {BufferAttribute} attribute - The buffer attribute to update.
*/
updateAttribute( /*attribute*/ ) { }
/**
* Destroys the GPU buffer of a shader attribute.
*
* @abstract
* @param {BufferAttribute} attribute - The buffer attribute to destroy.
*/
destroyAttribute( /*attribute*/ ) { }
// canvas
/**
* Returns the backend's rendering context.
*
* @abstract
* @return {Object} The rendering context.
*/
getContext() { }
/**
* Backends can use this method if they have to run
* logic when the renderer gets resized.
*
* @abstract
*/
updateSize() { }
/**
* Updates the viewport with the values from the given render context.
*
* @abstract
* @param {RenderContext} renderContext - The render context.
*/
updateViewport( /*renderContext*/ ) {}
// utils
/**
* Returns `true` if the given 3D object is fully occluded by other
* 3D objects in the scene. Backends must implement this method by using
* a Occlusion Query API.
*
* @abstract
* @param {RenderContext} renderContext - The render context.
* @param {Object3D} object - The 3D object to test.
* @return {Boolean} Whether the 3D object is fully occluded or not.
*/
isOccluded( /*renderContext, object*/ ) {}
/**
* Resolves the time stamp for the given render context and type.
*
* @async
* @abstract
* @param {String} [type='render'] - The type of the time stamp.
* @return {Promise<Number>} A Promise that resolves with the time stamp.
*/
async resolveTimestampsAsync( type = 'render' ) {
if ( ! this.trackTimestamp ) {
warnOnce( 'WebGPURenderer: Timestamp tracking is disabled.' );
return;
}
const queryPool = this.timestampQueryPool[ type ];
if ( ! queryPool ) {
warnOnce( `WebGPURenderer: No timestamp query pool for type '${type}' found.` );
return;
}
const duration = await queryPool.resolveQueriesAsync();
this.renderer.info[ type ].timestamp = duration;
return duration;
}
/**
* Can be used to synchronize CPU operations with GPU tasks. So when this method is called,
* the CPU waits for the GPU to complete its operation (e.g. a compute task).
*
* @async
* @abstract
* @return {Promise} A Promise that resolves when synchronization has been finished.
*/
async waitForGPU() {}
/**
* This method performs a readback operation by moving buffer data from
* a storage buffer attribute from the GPU to the CPU.
*
* @async
* @param {StorageBufferAttribute} attribute - The storage buffer attribute.
* @return {Promise<ArrayBuffer>} A promise that resolves with the buffer data when the data are ready.
*/
async getArrayBufferAsync( /* attribute */ ) {}
/**
* Checks if the given feature is supported by the backend.
*
* @async
* @abstract
* @param {String} name - The feature's name.
* @return {Promise<Boolean>} A Promise that resolves with a bool that indicates whether the feature is supported or not.
*/
async hasFeatureAsync( /*name*/ ) { }
/**
* Checks if the given feature is supported by the backend.
*
* @abstract
* @param {String} name - The feature's name.
* @return {Boolean} Whether the feature is supported or not.
*/
hasFeature( /*name*/ ) {}
/**
* Returns the maximum anisotropy texture filtering value.
*
* @abstract
* @return {Number} The maximum anisotropy texture filtering value.
*/
getMaxAnisotropy() {}
/**
* Returns the drawing buffer size.
*
* @return {Vector2} The drawing buffer size.
*/
getDrawingBufferSize() {
_vector2 = _vector2 || new Vector2();
return this.renderer.getDrawingBufferSize( _vector2 );
}
/**
* Defines the scissor test.
*
* @abstract
* @param {Boolean} boolean - Whether the scissor test should be enabled or not.
*/
setScissorTest( /*boolean*/ ) { }
/**
* Returns the clear color and alpha into a single
* color object.
*
* @return {Color4} The clear color.
*/
getClearColor() {
const renderer = this.renderer;
_color4 = _color4 || new Color4();
renderer.getClearColor( _color4 );
_color4.getRGB( _color4, this.renderer.currentColorSpace );
return _color4;
}
/**
* Returns the DOM element. If no DOM element exists, the backend
* creates a new one.
*
* @return {HTMLCanvasElement} The DOM element.
*/
getDomElement() {
let domElement = this.domElement;
if ( domElement === null ) {
domElement = ( this.parameters.canvas !== undefined ) ? this.parameters.canvas : createCanvasElement();
// OffscreenCanvas does not have setAttribute, see #22811
if ( 'setAttribute' in domElement ) domElement.setAttribute( 'data-engine', `three.js r${REVISION} webgpu` );
this.domElement = domElement;
}
return domElement;
}
/**
* Sets a dictionary for the given object into the
* internal data structure.
*
* @param {Object} object - The object.
* @param {Object} value - The dictionary to set.
*/
set( object, value ) {
this.data.set( object, value );
}
/**
* Returns the dictionary for the given object.
*
* @param {Object} object - The object.
* @return {Object} The object's dictionary.
*/
get( object ) {
let map = this.data.get( object );
if ( map === undefined ) {
map = {};
this.data.set( object, map );
}
return map;
}
/**
* Checks if the given object has a dictionary
* with data defined.
*
* @param {Object} object - The object.
* @return {Boolean} Whether a dictionary for the given object as been defined or not.
*/
has( object ) {
return this.data.has( object );
}
/**
* Deletes an object from the internal data structure.
*
* @param {Object} object - The object to delete.
*/
delete( object ) {
this.data.delete( object );
}
/**
* Frees internal resources.
*
* @abstract
*/
dispose() { }
}
let _id$1 = 0;
/**
* This module is internally used in context of compute shaders.
* This type of shader is not natively supported in WebGL 2 and
* thus implemented via Transform Feedback. `DualAttributeData`
* manages the related data.
*
* @private
*/
class DualAttributeData {
constructor( attributeData, dualBuffer ) {
this.buffers = [ attributeData.bufferGPU, dualBuffer ];
this.type = attributeData.type;
this.bufferType = attributeData.bufferType;
this.pbo = attributeData.pbo;
this.byteLength = attributeData.byteLength;
this.bytesPerElement = attributeData.BYTES_PER_ELEMENT;
this.version = attributeData.version;
this.isInteger = attributeData.isInteger;
this.activeBufferIndex = 0;
this.baseId = attributeData.id;
}
get id() {
return `${ this.baseId }|${ this.activeBufferIndex }`;
}
get bufferGPU() {
return this.buffers[ this.activeBufferIndex ];
}
get transformBuffer() {
return this.buffers[ this.activeBufferIndex ^ 1 ];
}
switchBuffers() {
this.activeBufferIndex ^= 1;
}
}
/**
* A WebGL 2 backend utility module for managing shader attributes.
*
* @private
*/
class WebGLAttributeUtils {
/**
* Constructs a new utility object.
*
* @param {WebGLBackend} backend - The WebGL 2 backend.
*/
constructor( backend ) {
/**
* A reference to the WebGL 2 backend.
*
* @type {WebGLBackend}
*/
this.backend = backend;
}
/**
* Creates the GPU buffer for the given buffer attribute.
*
* @param {BufferAttribute} attribute - The buffer attribute.
* @param {GLenum } bufferType - A flag that indicates the buffer type and thus binding point target.
*/
createAttribute( attribute, bufferType ) {
const backend = this.backend;
const { gl } = backend;
const array = attribute.array;
const usage = attribute.usage || gl.STATIC_DRAW;
const bufferAttribute = attribute.isInterleavedBufferAttribute ? attribute.data : attribute;
const bufferData = backend.get( bufferAttribute );
let bufferGPU = bufferData.bufferGPU;
if ( bufferGPU === undefined ) {
bufferGPU = this._createBuffer( gl, bufferType, array, usage );
bufferData.bufferGPU = bufferGPU;
bufferData.bufferType = bufferType;
bufferData.version = bufferAttribute.version;
}
//attribute.onUploadCallback();
let type;
if ( array instanceof Float32Array ) {
type = gl.FLOAT;
} else if ( array instanceof Uint16Array ) {
if ( attribute.isFloat16BufferAttribute ) {
type = gl.HALF_FLOAT;
} else {
type = gl.UNSIGNED_SHORT;
}
} else if ( array instanceof Int16Array ) {
type = gl.SHORT;
} else if ( array instanceof Uint32Array ) {
type = gl.UNSIGNED_INT;
} else if ( array instanceof Int32Array ) {
type = gl.INT;
} else if ( array instanceof Int8Array ) {
type = gl.BYTE;
} else if ( array instanceof Uint8Array ) {
type = gl.UNSIGNED_BYTE;
} else if ( array instanceof Uint8ClampedArray ) {
type = gl.UNSIGNED_BYTE;
} else {
throw new Error( 'THREE.WebGLBackend: Unsupported buffer data format: ' + array );
}
let attributeData = {
bufferGPU,
bufferType,
type,
byteLength: array.byteLength,
bytesPerElement: array.BYTES_PER_ELEMENT,
version: attribute.version,
pbo: attribute.pbo,
isInteger: type === gl.INT || type === gl.UNSIGNED_INT || attribute.gpuType === IntType,
id: _id$1 ++
};
if ( attribute.isStorageBufferAttribute || attribute.isStorageInstancedBufferAttribute ) {
// create buffer for transform feedback use
const bufferGPUDual = this._createBuffer( gl, bufferType, array, usage );
attributeData = new DualAttributeData( attributeData, bufferGPUDual );
}
backend.set( attribute, attributeData );
}
/**
* Updates the GPU buffer of the given buffer attribute.
*
* @param {BufferAttribute} attribute - The buffer attribute.
*/
updateAttribute( attribute ) {
const backend = this.backend;
const { gl } = backend;
const array = attribute.array;
const bufferAttribute = attribute.isInterleavedBufferAttribute ? attribute.data : attribute;
const bufferData = backend.get( bufferAttribute );
const bufferType = bufferData.bufferType;
const updateRanges = attribute.isInterleavedBufferAttribute ? attribute.data.updateRanges : attribute.updateRanges;
gl.bindBuffer( bufferType, bufferData.bufferGPU );
if ( updateRanges.length === 0 ) {
// Not using update ranges
gl.bufferSubData( bufferType, 0, array );
} else {
for ( let i = 0, l = updateRanges.length; i < l; i ++ ) {
const range = updateRanges[ i ];
gl.bufferSubData( bufferType, range.start * array.BYTES_PER_ELEMENT,
array, range.start, range.count );
}
bufferAttribute.clearUpdateRanges();
}
gl.bindBuffer( bufferType, null );
bufferData.version = bufferAttribute.version;
}
/**
* Destroys the GPU buffer of the given buffer attribute.
*
* @param {BufferAttribute} attribute - The buffer attribute.
*/
destroyAttribute( attribute ) {
const backend = this.backend;
const { gl } = backend;
if ( attribute.isInterleavedBufferAttribute ) {
backend.delete( attribute.data );
}
const attributeData = backend.get( attribute );
gl.deleteBuffer( attributeData.bufferGPU );
backend.delete( attribute );
}
/**
* This method performs a readback operation by moving buffer data from
* a storage buffer attribute from the GPU to the CPU.
*
* @async
* @param {StorageBufferAttribute} attribute - The storage buffer attribute.
* @return {Promise<ArrayBuffer>} A promise that resolves with the buffer data when the data are ready.
*/
async getArrayBufferAsync( attribute ) {
const backend = this.backend;
const { gl } = backend;
const bufferAttribute = attribute.isInterleavedBufferAttribute ? attribute.data : attribute;
const { bufferGPU } = backend.get( bufferAttribute );
const array = attribute.array;
const byteLength = array.byteLength;
gl.bindBuffer( gl.COPY_READ_BUFFER, bufferGPU );
const writeBuffer = gl.createBuffer();
gl.bindBuffer( gl.COPY_WRITE_BUFFER, writeBuffer );
gl.bufferData( gl.COPY_WRITE_BUFFER, byteLength, gl.STREAM_READ );
gl.copyBufferSubData( gl.COPY_READ_BUFFER, gl.COPY_WRITE_BUFFER, 0, 0, byteLength );
await backend.utils._clientWaitAsync();
const dstBuffer = new attribute.array.constructor( array.length );
// Ensure the buffer is bound before reading
gl.bindBuffer( gl.COPY_WRITE_BUFFER, writeBuffer );
gl.getBufferSubData( gl.COPY_WRITE_BUFFER, 0, dstBuffer );
gl.deleteBuffer( writeBuffer );
gl.bindBuffer( gl.COPY_READ_BUFFER, null );
gl.bindBuffer( gl.COPY_WRITE_BUFFER, null );
return dstBuffer.buffer;
}
/**
* Creates a WebGL buffer with the given data.
*
* @private
* @param {WebGL2RenderingContext} gl - The rendering context.
* @param {GLenum } bufferType - A flag that indicates the buffer type and thus binding point target.
* @param {TypedArray} array - The array of the buffer attribute.
* @param {GLenum} usage - The usage.
* @return {WebGLBuffer} The WebGL buffer.
*/
_createBuffer( gl, bufferType, array, usage ) {
const bufferGPU = gl.createBuffer();
gl.bindBuffer( bufferType, bufferGPU );
gl.bufferData( bufferType, array, usage );
gl.bindBuffer( bufferType, null );
return bufferGPU;
}
}
let equationToGL, factorToGL;
/**
* A WebGL 2 backend utility module for managing the WebGL state.
*
* The major goal of this module is to reduce the number of state changes
* by caching the WEbGL state with a series of variables. In this way, the
* renderer only executes state change commands when necessary which
* improves the overall performance.
*
* @private
*/
class WebGLState {
/**
* Constructs a new utility object.
*
* @param {WebGLBackend} backend - The WebGL 2 backend.
*/
constructor( backend ) {
/**
* A reference to the WebGL 2 backend.
*
* @type {WebGLBackend}
*/
this.backend = backend;
/**
* A reference to the rendering context.
*
* @type {WebGL2RenderingContext}
*/
this.gl = this.backend.gl;
// Below properties are intended to cache
// the WebGL state and are not explicitly
// documented for convenience reasons.
this.enabled = {};
this.currentFlipSided = null;
this.currentCullFace = null;
this.currentProgram = null;
this.currentBlendingEnabled = false;
this.currentBlending = null;
this.currentBlendSrc = null;
this.currentBlendDst = null;
this.currentBlendSrcAlpha = null;
this.currentBlendDstAlpha = null;
this.currentPremultipledAlpha = null;
this.currentPolygonOffsetFactor = null;
this.currentPolygonOffsetUnits = null;
this.currentColorMask = null;
this.currentDepthFunc = null;
this.currentDepthMask = null;
this.currentStencilFunc = null;
this.currentStencilRef = null;
this.currentStencilFuncMask = null;
this.currentStencilFail = null;
this.currentStencilZFail = null;
this.currentStencilZPass = null;
this.currentStencilMask = null;
this.currentLineWidth = null;
this.currentClippingPlanes = 0;
this.currentBoundFramebuffers = {};
this.currentDrawbuffers = new WeakMap();
this.maxTextures = this.gl.getParameter( this.gl.MAX_TEXTURE_IMAGE_UNITS );
this.currentTextureSlot = null;
this.currentBoundTextures = {};
this.currentBoundBufferBases = {};
this._init();
}
/**
* Inits the state of the utility.
*
* @private
*/
_init() {
const gl = this.gl;
// Store only WebGL constants here.
equationToGL = {
[ AddEquation ]: gl.FUNC_ADD,
[ SubtractEquation ]: gl.FUNC_SUBTRACT,
[ ReverseSubtractEquation ]: gl.FUNC_REVERSE_SUBTRACT
};
factorToGL = {
[ ZeroFactor ]: gl.ZERO,
[ OneFactor ]: gl.ONE,
[ SrcColorFactor ]: gl.SRC_COLOR,
[ SrcAlphaFactor ]: gl.SRC_ALPHA,
[ SrcAlphaSaturateFactor ]: gl.SRC_ALPHA_SATURATE,
[ DstColorFactor ]: gl.DST_COLOR,
[ DstAlphaFactor ]: gl.DST_ALPHA,
[ OneMinusSrcColorFactor ]: gl.ONE_MINUS_SRC_COLOR,
[ OneMinusSrcAlphaFactor ]: gl.ONE_MINUS_SRC_ALPHA,
[ OneMinusDstColorFactor ]: gl.ONE_MINUS_DST_COLOR,
[ OneMinusDstAlphaFactor ]: gl.ONE_MINUS_DST_ALPHA
};
const scissorParam = gl.getParameter( gl.SCISSOR_BOX );
const viewportParam = gl.getParameter( gl.VIEWPORT );
this.currentScissor = new Vector4().fromArray( scissorParam );
this.currentViewport = new Vector4().fromArray( viewportParam );
this._tempVec4 = new Vector4();
}
/**
* Enables the given WebGL capability.
*
* This method caches the capability state so
* `gl.enable()` is only called when necessary.
*
* @param {GLenum} id - The capability to enable.
*/
enable( id ) {
const { enabled } = this;
if ( enabled[ id ] !== true ) {
this.gl.enable( id );
enabled[ id ] = true;
}
}
/**
* Disables the given WebGL capability.
*
* This method caches the capability state so
* `gl.disable()` is only called when necessary.
*
* @param {GLenum} id - The capability to enable.
*/
disable( id ) {
const { enabled } = this;
if ( enabled[ id ] !== false ) {
this.gl.disable( id );
enabled[ id ] = false;
}
}
/**
* Specifies whether polygons are front- or back-facing
* by setting the winding orientation.
*
* This method caches the state so `gl.frontFace()` is only
* called when necessary.
*
* @param {Boolean} flipSided - Whether triangles flipped their sides or not.
*/
setFlipSided( flipSided ) {
if ( this.currentFlipSided !== flipSided ) {
const { gl } = this;
if ( flipSided ) {
gl.frontFace( gl.CW );
} else {
gl.frontFace( gl.CCW );
}
this.currentFlipSided = flipSided;
}
}
/**
* Specifies whether or not front- and/or back-facing
* polygons can be culled.
*
* This method caches the state so `gl.cullFace()` is only
* called when necessary.
*
* @param {Number} cullFace - Defines which polygons are candidates for culling.
*/
setCullFace( cullFace ) {
const { gl } = this;
if ( cullFace !== CullFaceNone ) {
this.enable( gl.CULL_FACE );
if ( cullFace !== this.currentCullFace ) {
if ( cullFace === CullFaceBack ) {
gl.cullFace( gl.BACK );
} else if ( cullFace === CullFaceFront ) {
gl.cullFace( gl.FRONT );
} else {
gl.cullFace( gl.FRONT_AND_BACK );
}
}
} else {
this.disable( gl.CULL_FACE );
}
this.currentCullFace = cullFace;
}
/**
* Specifies the width of line primitives.
*
* This method caches the state so `gl.lineWidth()` is only
* called when necessary.
*
* @param {Number} width - The line width.
*/
setLineWidth( width ) {
const { currentLineWidth, gl } = this;
if ( width !== currentLineWidth ) {
gl.lineWidth( width );
this.currentLineWidth = width;
}
}
/**
* Defines the blending.
*
* This method caches the state so `gl.blendEquation()`, `gl.blendEquationSeparate()`,
* `gl.blendFunc()` and `gl.blendFuncSeparate()` are only called when necessary.
*
* @param {Number} blending - The blending type.
* @param {Number} blendEquation - The blending equation.
* @param {Number} blendSrc - Only relevant for custom blending. The RGB source blending factor.
* @param {Number} blendDst - Only relevant for custom blending. The RGB destination blending factor.
* @param {Number} blendEquationAlpha - Only relevant for custom blending. The blending equation for alpha.
* @param {Number} blendSrcAlpha - Only relevant for custom blending. The alpha source blending factor.
* @param {Number} blendDstAlpha - Only relevant for custom blending. The alpha destination blending factor.
* @param {Boolean} premultipliedAlpha - Whether premultiplied alpha is enabled or not.
*/
setBlending( blending, blendEquation, blendSrc, blendDst, blendEquationAlpha, blendSrcAlpha, blendDstAlpha, premultipliedAlpha ) {
const { gl } = this;
if ( blending === NoBlending ) {
if ( this.currentBlendingEnabled === true ) {
this.disable( gl.BLEND );
this.currentBlendingEnabled = false;
}
return;
}
if ( this.currentBlendingEnabled === false ) {
this.enable( gl.BLEND );
this.currentBlendingEnabled = true;
}
if ( blending !== CustomBlending ) {
if ( blending !== this.currentBlending || premultipliedAlpha !== this.currentPremultipledAlpha ) {
if ( this.currentBlendEquation !== AddEquation || this.currentBlendEquationAlpha !== AddEquation ) {
gl.blendEquation( gl.FUNC_ADD );
this.currentBlendEquation = AddEquation;
this.currentBlendEquationAlpha = AddEquation;
}
if ( premultipliedAlpha ) {
switch ( blending ) {
case NormalBlending:
gl.blendFuncSeparate( gl.ONE, gl.ONE_MINUS_SRC_ALPHA, gl.ONE, gl.ONE_MINUS_SRC_ALPHA );
break;
case AdditiveBlending:
gl.blendFunc( gl.ONE, gl.ONE );
break;
case SubtractiveBlending:
gl.blendFuncSeparate( gl.ZERO, gl.ONE_MINUS_SRC_COLOR, gl.ZERO, gl.ONE );
break;
case MultiplyBlending:
gl.blendFuncSeparate( gl.ZERO, gl.SRC_COLOR, gl.ZERO, gl.SRC_ALPHA );
break;
default:
console.error( 'THREE.WebGLState: Invalid blending: ', blending );
break;
}
} else {
switch ( blending ) {
case NormalBlending:
gl.blendFuncSeparate( gl.SRC_ALPHA, gl.ONE_MINUS_SRC_ALPHA, gl.ONE, gl.ONE_MINUS_SRC_ALPHA );
break;
case AdditiveBlending:
gl.blendFunc( gl.SRC_ALPHA, gl.ONE );
break;
case SubtractiveBlending:
gl.blendFuncSeparate( gl.ZERO, gl.ONE_MINUS_SRC_COLOR, gl.ZERO, gl.ONE );
break;
case MultiplyBlending:
gl.blendFunc( gl.ZERO, gl.SRC_COLOR );
break;
default:
console.error( 'THREE.WebGLState: Invalid blending: ', blending );
break;
}
}
this.currentBlendSrc = null;
this.currentBlendDst = null;
this.currentBlendSrcAlpha = null;
this.currentBlendDstAlpha = null;
this.currentBlending = blending;
this.currentPremultipledAlpha = premultipliedAlpha;
}
return;
}
// custom blending
blendEquationAlpha = blendEquationAlpha || blendEquation;
blendSrcAlpha = blendSrcAlpha || blendSrc;
blendDstAlpha = blendDstAlpha || blendDst;
if ( blendEquation !== this.currentBlendEquation || blendEquationAlpha !== this.currentBlendEquationAlpha ) {
gl.blendEquationSeparate( equationToGL[ blendEquation ], equationToGL[ blendEquationAlpha ] );
this.currentBlendEquation = blendEquation;
this.currentBlendEquationAlpha = blendEquationAlpha;
}
if ( blendSrc !== this.currentBlendSrc || blendDst !== this.currentBlendDst || blendSrcAlpha !== this.currentBlendSrcAlpha || blendDstAlpha !== this.currentBlendDstAlpha ) {
gl.blendFuncSeparate( factorToGL[ blendSrc ], factorToGL[ blendDst ], factorToGL[ blendSrcAlpha ], factorToGL[ blendDstAlpha ] );
this.currentBlendSrc = blendSrc;
this.currentBlendDst = blendDst;
this.currentBlendSrcAlpha = blendSrcAlpha;
this.currentBlendDstAlpha = blendDstAlpha;
}
this.currentBlending = blending;
this.currentPremultipledAlpha = false;
}
/**
* Specifies whether colors can be written when rendering
* into a framebuffer or not.
*
* This method caches the state so `gl.colorMask()` is only
* called when necessary.
*
* @param {Boolean} colorMask - The color mask.
*/
setColorMask( colorMask ) {
if ( this.currentColorMask !== colorMask ) {
this.gl.colorMask( colorMask, colorMask, colorMask, colorMask );
this.currentColorMask = colorMask;
}
}
/**
* Specifies whether the depth test is enabled or not.
*
* @param {Boolean} depthTest - Whether the depth test is enabled or not.
*/
setDepthTest( depthTest ) {
const { gl } = this;
if ( depthTest ) {
this.enable( gl.DEPTH_TEST );
} else {
this.disable( gl.DEPTH_TEST );
}
}
/**
* Specifies whether depth values can be written when rendering
* into a framebuffer or not.
*
* This method caches the state so `gl.depthMask()` is only
* called when necessary.
*
* @param {Boolean} depthMask - The depth mask.
*/
setDepthMask( depthMask ) {
if ( this.currentDepthMask !== depthMask ) {
this.gl.depthMask( depthMask );
this.currentDepthMask = depthMask;
}
}
/**
* Specifies the depth compare function.
*
* This method caches the state so `gl.depthFunc()` is only
* called when necessary.
*
* @param {Number} depthFunc - The depth compare function.
*/
setDepthFunc( depthFunc ) {
if ( this.currentDepthFunc !== depthFunc ) {
const { gl } = this;
switch ( depthFunc ) {
case NeverDepth:
gl.depthFunc( gl.NEVER );
break;
case AlwaysDepth:
gl.depthFunc( gl.ALWAYS );
break;
case LessDepth:
gl.depthFunc( gl.LESS );
break;
case LessEqualDepth:
gl.depthFunc( gl.LEQUAL );
break;
case EqualDepth:
gl.depthFunc( gl.EQUAL );
break;
case GreaterEqualDepth:
gl.depthFunc( gl.GEQUAL );
break;
case GreaterDepth:
gl.depthFunc( gl.GREATER );
break;
case NotEqualDepth:
gl.depthFunc( gl.NOTEQUAL );
break;
default:
gl.depthFunc( gl.LEQUAL );
}
this.currentDepthFunc = depthFunc;
}
}
/**
* Specifies the viewport.
*
* @param {Number} x - The x-coordinate of the lower left corner of the viewport.
* @param {Number} y - The y-coordinate of the lower left corner of the viewport.
* @param {Number} width - The width of the viewport.
* @param {Number} height - The height of the viewport.
*
*/
scissor( x, y, width, height ) {
const scissor = this._tempVec4.set( x, y, width, height );
if ( this.currentScissor.equals( scissor ) === false ) {
const { gl } = this;
gl.scissor( scissor.x, scissor.y, scissor.z, scissor.w );
this.currentScissor.copy( scissor );
}
}
/**
* Specifies the viewport.
*
* @param {Number} x - The x-coordinate of the lower left corner of the viewport.
* @param {Number} y - The y-coordinate of the lower left corner of the viewport.
* @param {Number} width - The width of the viewport.
* @param {Number} height - The height of the viewport.
*
*/
viewport( x, y, width, height ) {
const viewport = this._tempVec4.set( x, y, width, height );
if ( this.currentViewport.equals( viewport ) === false ) {
const { gl } = this;
gl.viewport( viewport.x, viewport.y, viewport.z, viewport.w );
this.currentViewport.copy( viewport );
}
}
/**
* Defines the scissor test.
*
* @param {Boolean} boolean - Whether the scissor test should be enabled or not.
*/
setScissorTest( boolean ) {
const gl = this.gl;
if ( boolean ) {
gl.enable( gl.SCISSOR_TEST );
} else {
gl.disable( gl.SCISSOR_TEST );
}
}
/**
* Specifies whether the stencil test is enabled or not.
*
* @param {Boolean} stencilTest - Whether the stencil test is enabled or not.
*/
setStencilTest( stencilTest ) {
const { gl } = this;
if ( stencilTest ) {
this.enable( gl.STENCIL_TEST );
} else {
this.disable( gl.STENCIL_TEST );
}
}
/**
* Specifies whether stencil values can be written when rendering
* into a framebuffer or not.
*
* This method caches the state so `gl.stencilMask()` is only
* called when necessary.
*
* @param {Boolean} stencilMask - The stencil mask.
*/
setStencilMask( stencilMask ) {
if ( this.currentStencilMask !== stencilMask ) {
this.gl.stencilMask( stencilMask );
this.currentStencilMask = stencilMask;
}
}
/**
* Specifies whether the stencil test functions.
*
* This method caches the state so `gl.stencilFunc()` is only
* called when necessary.
*
* @param {Number} stencilFunc - The stencil compare function.
* @param {Number} stencilRef - The reference value for the stencil test.
* @param {Number} stencilMask - A bit-wise mask that is used to AND the reference value and the stored stencil value when the test is done.
*/
setStencilFunc( stencilFunc, stencilRef, stencilMask ) {
if ( this.currentStencilFunc !== stencilFunc ||
this.currentStencilRef !== stencilRef ||
this.currentStencilFuncMask !== stencilMask ) {
this.gl.stencilFunc( stencilFunc, stencilRef, stencilMask );
this.currentStencilFunc = stencilFunc;
this.currentStencilRef = stencilRef;
this.currentStencilFuncMask = stencilMask;
}
}
/**
* Specifies whether the stencil test operation.
*
* This method caches the state so `gl.stencilOp()` is only
* called when necessary.
*
* @param {Number} stencilFail - The function to use when the stencil test fails.
* @param {Number} stencilZFail - The function to use when the stencil test passes, but the depth test fail.
* @param {Number} stencilZPass - The function to use when both the stencil test and the depth test pass,
* or when the stencil test passes and there is no depth buffer or depth testing is disabled.
*/
setStencilOp( stencilFail, stencilZFail, stencilZPass ) {
if ( this.currentStencilFail !== stencilFail ||
this.currentStencilZFail !== stencilZFail ||
this.currentStencilZPass !== stencilZPass ) {
this.gl.stencilOp( stencilFail, stencilZFail, stencilZPass );
this.currentStencilFail = stencilFail;
this.currentStencilZFail = stencilZFail;
this.currentStencilZPass = stencilZPass;
}
}
/**
* Configures the WebGL state for the given material.
*
* @param {Material} material - The material to configure the state for.
* @param {Number} frontFaceCW - Whether the front faces are counter-clockwise or not.
* @param {Number} hardwareClippingPlanes - The number of hardware clipping planes.
*/
setMaterial( material, frontFaceCW, hardwareClippingPlanes ) {
const { gl } = this;
material.side === DoubleSide
? this.disable( gl.CULL_FACE )
: this.enable( gl.CULL_FACE );
let flipSided = ( material.side === BackSide );
if ( frontFaceCW ) flipSided = ! flipSided;
this.setFlipSided( flipSided );
( material.blending === NormalBlending && material.transparent === false )
? this.setBlending( NoBlending )
: this.setBlending( material.blending, material.blendEquation, material.blendSrc, material.blendDst, material.blendEquationAlpha, material.blendSrcAlpha, material.blendDstAlpha, material.premultipliedAlpha );
this.setDepthFunc( material.depthFunc );
this.setDepthTest( material.depthTest );
this.setDepthMask( material.depthWrite );
this.setColorMask( material.colorWrite );
const stencilWrite = material.stencilWrite;
this.setStencilTest( stencilWrite );
if ( stencilWrite ) {
this.setStencilMask( material.stencilWriteMask );
this.setStencilFunc( material.stencilFunc, material.stencilRef, material.stencilFuncMask );
this.setStencilOp( material.stencilFail, material.stencilZFail, material.stencilZPass );
}
this.setPolygonOffset( material.polygonOffset, material.polygonOffsetFactor, material.polygonOffsetUnits );
material.alphaToCoverage === true && this.backend.renderer.samples > 1
? this.enable( gl.SAMPLE_ALPHA_TO_COVERAGE )
: this.disable( gl.SAMPLE_ALPHA_TO_COVERAGE );
if ( hardwareClippingPlanes > 0 ) {
if ( this.currentClippingPlanes !== hardwareClippingPlanes ) {
const CLIP_DISTANCE0_WEBGL = 0x3000;
for ( let i = 0; i < 8; i ++ ) {
if ( i < hardwareClippingPlanes ) {
this.enable( CLIP_DISTANCE0_WEBGL + i );
} else {
this.disable( CLIP_DISTANCE0_WEBGL + i );
}
}
}
}
}
/**
* Specifies the polygon offset.
*
* This method caches the state so `gl.polygonOffset()` is only
* called when necessary.
*
* @param {Boolean} polygonOffset - Whether polygon offset is enabled or not.
* @param {Number} factor - The scale factor for the variable depth offset for each polygon.
* @param {Number} units - The multiplier by which an implementation-specific value is multiplied with to create a constant depth offset.
*/
setPolygonOffset( polygonOffset, factor, units ) {
const { gl } = this;
if ( polygonOffset ) {
this.enable( gl.POLYGON_OFFSET_FILL );
if ( this.currentPolygonOffsetFactor !== factor || this.currentPolygonOffsetUnits !== units ) {
gl.polygonOffset( factor, units );
this.currentPolygonOffsetFactor = factor;
this.currentPolygonOffsetUnits = units;
}
} else {
this.disable( gl.POLYGON_OFFSET_FILL );
}
}
/**
* Defines the usage of the given WebGL program.
*
* This method caches the state so `gl.useProgram()` is only
* called when necessary.
*
* @param {WebGLProgram} program - The WebGL program to use.
* @return {Boolean} Whether a program change has been executed or not.
*/
useProgram( program ) {
if ( this.currentProgram !== program ) {
this.gl.useProgram( program );
this.currentProgram = program;
return true;
}
return false;
}
// framebuffer
/**
* Binds the given framebuffer.
*
* This method caches the state so `gl.bindFramebuffer()` is only
* called when necessary.
*
* @param {Number} target - The binding point (target).
* @param {WebGLFramebuffer} framebuffer - The WebGL framebuffer to bind.
* @return {Boolean} Whether a bind has been executed or not.
*/
bindFramebuffer( target, framebuffer ) {
const { gl, currentBoundFramebuffers } = this;
if ( currentBoundFramebuffers[ target ] !== framebuffer ) {
gl.bindFramebuffer( target, framebuffer );
currentBoundFramebuffers[ target ] = framebuffer;
// gl.DRAW_FRAMEBUFFER is equivalent to gl.FRAMEBUFFER
if ( target === gl.DRAW_FRAMEBUFFER ) {
currentBoundFramebuffers[ gl.FRAMEBUFFER ] = framebuffer;
}
if ( target === gl.FRAMEBUFFER ) {
currentBoundFramebuffers[ gl.DRAW_FRAMEBUFFER ] = framebuffer;
}
return true;
}
return false;
}
/**
* Defines draw buffers to which fragment colors are written into.
* Configures the MRT setup of custom framebuffers.
*
* This method caches the state so `gl.drawBuffers()` is only
* called when necessary.
*
* @param {RenderContext} renderContext - The render context.
* @param {WebGLFramebuffer} framebuffer - The WebGL framebuffer.
*/
drawBuffers( renderContext, framebuffer ) {
const { gl } = this;
let drawBuffers = [];
let needsUpdate = false;
if ( renderContext.textures !== null ) {
drawBuffers = this.currentDrawbuffers.get( framebuffer );
if ( drawBuffers === undefined ) {
drawBuffers = [];
this.currentDrawbuffers.set( framebuffer, drawBuffers );
}
const textures = renderContext.textures;
if ( drawBuffers.length !== textures.length || drawBuffers[ 0 ] !== gl.COLOR_ATTACHMENT0 ) {
for ( let i = 0, il = textures.length; i < il; i ++ ) {
drawBuffers[ i ] = gl.COLOR_ATTACHMENT0 + i;
}
drawBuffers.length = textures.length;
needsUpdate = true;
}
} else {
if ( drawBuffers[ 0 ] !== gl.BACK ) {
drawBuffers[ 0 ] = gl.BACK;
needsUpdate = true;
}
}
if ( needsUpdate ) {
gl.drawBuffers( drawBuffers );
}
}
// texture
/**
* Makes the given texture unit active.
*
* This method caches the state so `gl.activeTexture()` is only
* called when necessary.
*
* @param {Number} webglSlot - The texture unit to make active.
*/
activeTexture( webglSlot ) {
const { gl, currentTextureSlot, maxTextures } = this;
if ( webglSlot === undefined ) webglSlot = gl.TEXTURE0 + maxTextures - 1;
if ( currentTextureSlot !== webglSlot ) {
gl.activeTexture( webglSlot );
this.currentTextureSlot = webglSlot;
}
}
/**
* Binds the given WebGL texture to a target.
*
* This method caches the state so `gl.bindTexture()` is only
* called when necessary.
*
* @param {Number} webglType - The binding point (target).
* @param {WebGLTexture} webglTexture - The WebGL texture to bind.
* @param {Number} webglSlot - The texture.
*/
bindTexture( webglType, webglTexture, webglSlot ) {
const { gl, currentTextureSlot, currentBoundTextures, maxTextures } = this;
if ( webglSlot === undefined ) {
if ( currentTextureSlot === null ) {
webglSlot = gl.TEXTURE0 + maxTextures - 1;
} else {
webglSlot = currentTextureSlot;
}
}
let boundTexture = currentBoundTextures[ webglSlot ];
if ( boundTexture === undefined ) {
boundTexture = { type: undefined, texture: undefined };
currentBoundTextures[ webglSlot ] = boundTexture;
}
if ( boundTexture.type !== webglType || boundTexture.texture !== webglTexture ) {
if ( currentTextureSlot !== webglSlot ) {
gl.activeTexture( webglSlot );
this.currentTextureSlot = webglSlot;
}
gl.bindTexture( webglType, webglTexture );
boundTexture.type = webglType;
boundTexture.texture = webglTexture;
}
}
/**
* Binds a given WebGL buffer to a given binding point (target) at a given index.
*
* This method caches the state so `gl.bindBufferBase()` is only
* called when necessary.
*
* @param {Number} target - The target for the bind operation.
* @param {Number} index - The index of the target.
* @param {WebGLBuffer} buffer - The WebGL buffer.
* @return {Boolean} Whether a bind has been executed or not.
*/
bindBufferBase( target, index, buffer ) {
const { gl } = this;
const key = `${target}-${index}`;
if ( this.currentBoundBufferBases[ key ] !== buffer ) {
gl.bindBufferBase( target, index, buffer );
this.currentBoundBufferBases[ key ] = buffer;
return true;
}
return false;
}
/**
* Unbinds the current bound texture.
*
* This method caches the state so `gl.bindTexture()` is only
* called when necessary.
*/
unbindTexture() {
const { gl, currentTextureSlot, currentBoundTextures } = this;
const boundTexture = currentBoundTextures[ currentTextureSlot ];
if ( boundTexture !== undefined && boundTexture.type !== undefined ) {
gl.bindTexture( boundTexture.type, null );
boundTexture.type = undefined;
boundTexture.texture = undefined;
}
}
}
/**
* A WebGL 2 backend utility module with common helpers.
*
* @private
*/
class WebGLUtils {
/**
* Constructs a new utility object.
*
* @param {WebGLBackend} backend - The WebGL 2 backend.
*/
constructor( backend ) {
/**
* A reference to the WebGL 2 backend.
*
* @type {WebGLBackend}
*/
this.backend = backend;
/**
* A reference to the rendering context.
*
* @type {WebGL2RenderingContext}
*/
this.gl = this.backend.gl;
/**
* A reference to a backend module holding extension-related
* utility functions.
*
* @type {WebGLExtensions}
*/
this.extensions = backend.extensions;
}
/**
* Converts the given three.js constant into a WebGL constant.
* The method currently supports the conversion of texture formats
* and types.
*
* @param {Number} p - The three.js constant.
* @param {String} [colorSpace=NoColorSpace] - The color space.
* @return {Number} The corresponding WebGL constant.
*/
convert( p, colorSpace = NoColorSpace ) {
const { gl, extensions } = this;
let extension;
if ( p === UnsignedByteType ) return gl.UNSIGNED_BYTE;
if ( p === UnsignedShort4444Type ) return gl.UNSIGNED_SHORT_4_4_4_4;
if ( p === UnsignedShort5551Type ) return gl.UNSIGNED_SHORT_5_5_5_1;
if ( p === UnsignedInt5999Type ) return gl.UNSIGNED_INT_5_9_9_9_REV;
if ( p === ByteType ) return gl.BYTE;
if ( p === ShortType ) return gl.SHORT;
if ( p === UnsignedShortType ) return gl.UNSIGNED_SHORT;
if ( p === IntType ) return gl.INT;
if ( p === UnsignedIntType ) return gl.UNSIGNED_INT;
if ( p === FloatType ) return gl.FLOAT;
if ( p === HalfFloatType ) {
return gl.HALF_FLOAT;
}
if ( p === AlphaFormat ) return gl.ALPHA;
if ( p === RGBFormat ) return gl.RGB;
if ( p === RGBAFormat ) return gl.RGBA;
if ( p === LuminanceFormat ) return gl.LUMINANCE;
if ( p === LuminanceAlphaFormat ) return gl.LUMINANCE_ALPHA;
if ( p === DepthFormat ) return gl.DEPTH_COMPONENT;
if ( p === DepthStencilFormat ) return gl.DEPTH_STENCIL;
// WebGL2 formats.
if ( p === RedFormat ) return gl.RED;
if ( p === RedIntegerFormat ) return gl.RED_INTEGER;
if ( p === RGFormat ) return gl.RG;
if ( p === RGIntegerFormat ) return gl.RG_INTEGER;
if ( p === RGBAIntegerFormat ) return gl.RGBA_INTEGER;
// S3TC
if ( p === RGB_S3TC_DXT1_Format || p === RGBA_S3TC_DXT1_Format || p === RGBA_S3TC_DXT3_Format || p === RGBA_S3TC_DXT5_Format ) {
if ( colorSpace === SRGBColorSpace ) {
extension = extensions.get( 'WEBGL_compressed_texture_s3tc_srgb' );
if ( extension !== null ) {
if ( p === RGB_S3TC_DXT1_Format ) return extension.COMPRESSED_SRGB_S3TC_DXT1_EXT;
if ( p === RGBA_S3TC_DXT1_Format ) return extension.COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT;
if ( p === RGBA_S3TC_DXT3_Format ) return extension.COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT;
if ( p === RGBA_S3TC_DXT5_Format ) return extension.COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT;
} else {
return null;
}
} else {
extension = extensions.get( 'WEBGL_compressed_texture_s3tc' );
if ( extension !== null ) {
if ( p === RGB_S3TC_DXT1_Format ) return extension.COMPRESSED_RGB_S3TC_DXT1_EXT;
if ( p === RGBA_S3TC_DXT1_Format ) return extension.COMPRESSED_RGBA_S3TC_DXT1_EXT;
if ( p === RGBA_S3TC_DXT3_Format ) return extension.COMPRESSED_RGBA_S3TC_DXT3_EXT;
if ( p === RGBA_S3TC_DXT5_Format ) return extension.COMPRESSED_RGBA_S3TC_DXT5_EXT;
} else {
return null;
}
}
}
// PVRTC
if ( p === RGB_PVRTC_4BPPV1_Format || p === RGB_PVRTC_2BPPV1_Format || p === RGBA_PVRTC_4BPPV1_Format || p === RGBA_PVRTC_2BPPV1_Format ) {
extension = extensions.get( 'WEBGL_compressed_texture_pvrtc' );
if ( extension !== null ) {
if ( p === RGB_PVRTC_4BPPV1_Format ) return extension.COMPRESSED_RGB_PVRTC_4BPPV1_IMG;
if ( p === RGB_PVRTC_2BPPV1_Format ) return extension.COMPRESSED_RGB_PVRTC_2BPPV1_IMG;
if ( p === RGBA_PVRTC_4BPPV1_Format ) return extension.COMPRESSED_RGBA_PVRTC_4BPPV1_IMG;
if ( p === RGBA_PVRTC_2BPPV1_Format ) return extension.COMPRESSED_RGBA_PVRTC_2BPPV1_IMG;
} else {
return null;
}
}
// ETC
if ( p === RGB_ETC1_Format || p === RGB_ETC2_Format || p === RGBA_ETC2_EAC_Format ) {
extension = extensions.get( 'WEBGL_compressed_texture_etc' );
if ( extension !== null ) {
if ( p === RGB_ETC1_Format || p === RGB_ETC2_Format ) return ( colorSpace === SRGBColorSpace ) ? extension.COMPRESSED_SRGB8_ETC2 : extension.COMPRESSED_RGB8_ETC2;
if ( p === RGBA_ETC2_EAC_Format ) return ( colorSpace === SRGBColorSpace ) ? extension.COMPRESSED_SRGB8_ALPHA8_ETC2_EAC : extension.COMPRESSED_RGBA8_ETC2_EAC;
} else {
return null;
}
}
// ASTC
if ( p === RGBA_ASTC_4x4_Format || p === RGBA_ASTC_5x4_Format || p === RGBA_ASTC_5x5_Format ||
p === RGBA_ASTC_6x5_Format || p === RGBA_ASTC_6x6_Format || p === RGBA_ASTC_8x5_Format ||
p === RGBA_ASTC_8x6_Format || p === RGBA_ASTC_8x8_Format || p === RGBA_ASTC_10x5_Format ||
p === RGBA_ASTC_10x6_Format || p === RGBA_ASTC_10x8_Format || p === RGBA_ASTC_10x10_Format ||
p === RGBA_ASTC_12x10_Format || p === RGBA_ASTC_12x12_Format ) {
extension = extensions.get( 'WEBGL_compressed_texture_astc' );
if ( extension !== null ) {
if ( p === RGBA_ASTC_4x4_Format ) return ( colorSpace === SRGBColorSpace ) ? extension.COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR : extension.COMPRESSED_RGBA_ASTC_4x4_KHR;
if ( p === RGBA_ASTC_5x4_Format ) return ( colorSpace === SRGBColorSpace ) ? extension.COMPRESSED_SRGB8_ALPHA8_ASTC_5x4_KHR : extension.COMPRESSED_RGBA_ASTC_5x4_KHR;
if ( p === RGBA_ASTC_5x5_Format ) return ( colorSpace === SRGBColorSpace ) ? extension.COMPRESSED_SRGB8_ALPHA8_ASTC_5x5_KHR : extension.COMPRESSED_RGBA_ASTC_5x5_KHR;
if ( p === RGBA_ASTC_6x5_Format ) return ( colorSpace === SRGBColorSpace ) ? extension.COMPRESSED_SRGB8_ALPHA8_ASTC_6x5_KHR : extension.COMPRESSED_RGBA_ASTC_6x5_KHR;
if ( p === RGBA_ASTC_6x6_Format ) return ( colorSpace === SRGBColorSpace ) ? extension.COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR : extension.COMPRESSED_RGBA_ASTC_6x6_KHR;
if ( p === RGBA_ASTC_8x5_Format ) return ( colorSpace === SRGBColorSpace ) ? extension.COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR : extension.COMPRESSED_RGBA_ASTC_8x5_KHR;
if ( p === RGBA_ASTC_8x6_Format ) return ( colorSpace === SRGBColorSpace ) ? extension.COMPRESSED_SRGB8_ALPHA8_ASTC_8x6_KHR : extension.COMPRESSED_RGBA_ASTC_8x6_KHR;
if ( p === RGBA_ASTC_8x8_Format ) return ( colorSpace === SRGBColorSpace ) ? extension.COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR : extension.COMPRESSED_RGBA_ASTC_8x8_KHR;
if ( p === RGBA_ASTC_10x5_Format ) return ( colorSpace === SRGBColorSpace ) ? extension.COMPRESSED_SRGB8_ALPHA8_ASTC_10x5_KHR : extension.COMPRESSED_RGBA_ASTC_10x5_KHR;
if ( p === RGBA_ASTC_10x6_Format ) return ( colorSpace === SRGBColorSpace ) ? extension.COMPRESSED_SRGB8_ALPHA8_ASTC_10x6_KHR : extension.COMPRESSED_RGBA_ASTC_10x6_KHR;
if ( p === RGBA_ASTC_10x8_Format ) return ( colorSpace === SRGBColorSpace ) ? extension.COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR : extension.COMPRESSED_RGBA_ASTC_10x8_KHR;
if ( p === RGBA_ASTC_10x10_Format ) return ( colorSpace === SRGBColorSpace ) ? extension.COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR : extension.COMPRESSED_RGBA_ASTC_10x10_KHR;
if ( p === RGBA_ASTC_12x10_Format ) return ( colorSpace === SRGBColorSpace ) ? extension.COMPRESSED_SRGB8_ALPHA8_ASTC_12x10_KHR : extension.COMPRESSED_RGBA_ASTC_12x10_KHR;
if ( p === RGBA_ASTC_12x12_Format ) return ( colorSpace === SRGBColorSpace ) ? extension.COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR : extension.COMPRESSED_RGBA_ASTC_12x12_KHR;
} else {
return null;
}
}
// BPTC
if ( p === RGBA_BPTC_Format ) {
extension = extensions.get( 'EXT_texture_compression_bptc' );
if ( extension !== null ) {
if ( p === RGBA_BPTC_Format ) return ( colorSpace === SRGBColorSpace ) ? extension.COMPRESSED_SRGB_ALPHA_BPTC_UNORM_EXT : extension.COMPRESSED_RGBA_BPTC_UNORM_EXT;
} else {
return null;
}
}
// RGTC
if ( p === RED_RGTC1_Format || p === SIGNED_RED_RGTC1_Format || p === RED_GREEN_RGTC2_Format || p === SIGNED_RED_GREEN_RGTC2_Format ) {
extension = extensions.get( 'EXT_texture_compression_rgtc' );
if ( extension !== null ) {
if ( p === RGBA_BPTC_Format ) return extension.COMPRESSED_RED_RGTC1_EXT;
if ( p === SIGNED_RED_RGTC1_Format ) return extension.COMPRESSED_SIGNED_RED_RGTC1_EXT;
if ( p === RED_GREEN_RGTC2_Format ) return extension.COMPRESSED_RED_GREEN_RGTC2_EXT;
if ( p === SIGNED_RED_GREEN_RGTC2_Format ) return extension.COMPRESSED_SIGNED_RED_GREEN_RGTC2_EXT;
} else {
return null;
}
}
//
if ( p === UnsignedInt248Type ) {
return gl.UNSIGNED_INT_24_8;
}
// if "p" can't be resolved, assume the user defines a WebGL constant as a string (fallback/workaround for packed RGB formats)
return ( gl[ p ] !== undefined ) ? gl[ p ] : null;
}
/**
* This method can be used to synchronize the CPU with the GPU by waiting until
* ongoing GPU commands have been completed.
*
* @private
* @return {Promise} A promise that resolves when all ongoing GPU commands have been completed.
*/
_clientWaitAsync() {
const { gl } = this;
const sync = gl.fenceSync( gl.SYNC_GPU_COMMANDS_COMPLETE, 0 );
gl.flush();
return new Promise( ( resolve, reject ) => {
function test() {
const res = gl.clientWaitSync( sync, gl.SYNC_FLUSH_COMMANDS_BIT, 0 );
if ( res === gl.WAIT_FAILED ) {
gl.deleteSync( sync );
reject();
return;
}
if ( res === gl.TIMEOUT_EXPIRED ) {
requestAnimationFrame( test );
return;
}
gl.deleteSync( sync );
resolve();
}
test();
} );
}
}
let initialized = false, wrappingToGL, filterToGL, compareToGL;
/**
* A WebGL 2 backend utility module for managing textures.
*
* @private
*/
class WebGLTextureUtils {
/**
* Constructs a new utility object.
*
* @param {WebGLBackend} backend - The WebGL 2 backend.
*/
constructor( backend ) {
/**
* A reference to the WebGL 2 backend.
*
* @type {WebGLBackend}
*/
this.backend = backend;
/**
* A reference to the rendering context.
*
* @type {WebGL2RenderingContext}
*/
this.gl = backend.gl;
/**
* A reference to a backend module holding extension-related
* utility functions.
*
* @type {WebGLExtensions}
*/
this.extensions = backend.extensions;
/**
* A dictionary for managing default textures. The key
* is the binding point (target), the value the WEbGL texture object.
*
* @type {Object<GLenum,WebGLTexture>}
*/
this.defaultTextures = {};
if ( initialized === false ) {
this._init();
initialized = true;
}
}
/**
* Inits the state of the utility.
*
* @private
*/
_init() {
const gl = this.gl;
// Store only WebGL constants here.
wrappingToGL = {
[ RepeatWrapping ]: gl.REPEAT,
[ ClampToEdgeWrapping ]: gl.CLAMP_TO_EDGE,
[ MirroredRepeatWrapping ]: gl.MIRRORED_REPEAT
};
filterToGL = {
[ NearestFilter ]: gl.NEAREST,
[ NearestMipmapNearestFilter ]: gl.NEAREST_MIPMAP_NEAREST,
[ NearestMipmapLinearFilter ]: gl.NEAREST_MIPMAP_LINEAR,
[ LinearFilter ]: gl.LINEAR,
[ LinearMipmapNearestFilter ]: gl.LINEAR_MIPMAP_NEAREST,
[ LinearMipmapLinearFilter ]: gl.LINEAR_MIPMAP_LINEAR
};
compareToGL = {
[ NeverCompare ]: gl.NEVER,
[ AlwaysCompare ]: gl.ALWAYS,
[ LessCompare ]: gl.LESS,
[ LessEqualCompare ]: gl.LEQUAL,
[ EqualCompare ]: gl.EQUAL,
[ GreaterEqualCompare ]: gl.GEQUAL,
[ GreaterCompare ]: gl.GREATER,
[ NotEqualCompare ]: gl.NOTEQUAL
};
}
/**
* Returns the native texture type for the given texture.
*
* @param {Texture} texture - The texture.
* @return {GLenum} The native texture type.
*/
getGLTextureType( texture ) {
const { gl } = this;
let glTextureType;
if ( texture.isCubeTexture === true ) {
glTextureType = gl.TEXTURE_CUBE_MAP;
} else if ( texture.isDataArrayTexture === true || texture.isCompressedArrayTexture === true ) {
glTextureType = gl.TEXTURE_2D_ARRAY;
} else if ( texture.isData3DTexture === true ) { // TODO: isCompressed3DTexture, wait for #26642
glTextureType = gl.TEXTURE_3D;
} else {
glTextureType = gl.TEXTURE_2D;
}
return glTextureType;
}
/**
* Returns the native texture type for the given texture.
*
* @param {String?} internalFormatName - The internal format name. When `null`, the internal format is derived from the subsequent parameters.
* @param {GLenum} glFormat - The WebGL format.
* @param {GLenum} glType - The WebGL type.
* @param {String} colorSpace - The texture's color space.
* @param {Boolean} [forceLinearTransfer=false] - Whether to force a linear transfer or not.
* @return {GLenum} The internal format.
*/
getInternalFormat( internalFormatName, glFormat, glType, colorSpace, forceLinearTransfer = false ) {
const { gl, extensions } = this;
if ( internalFormatName !== null ) {
if ( gl[ internalFormatName ] !== undefined ) return gl[ internalFormatName ];
console.warn( 'THREE.WebGLRenderer: Attempt to use non-existing WebGL internal format \'' + internalFormatName + '\'' );
}
let internalFormat = glFormat;
if ( glFormat === gl.RED ) {
if ( glType === gl.FLOAT ) internalFormat = gl.R32F;
if ( glType === gl.HALF_FLOAT ) internalFormat = gl.R16F;
if ( glType === gl.UNSIGNED_BYTE ) internalFormat = gl.R8;
if ( glType === gl.UNSIGNED_SHORT ) internalFormat = gl.R16;
if ( glType === gl.UNSIGNED_INT ) internalFormat = gl.R32UI;
if ( glType === gl.BYTE ) internalFormat = gl.R8I;
if ( glType === gl.SHORT ) internalFormat = gl.R16I;
if ( glType === gl.INT ) internalFormat = gl.R32I;
}
if ( glFormat === gl.RED_INTEGER ) {
if ( glType === gl.UNSIGNED_BYTE ) internalFormat = gl.R8UI;
if ( glType === gl.UNSIGNED_SHORT ) internalFormat = gl.R16UI;
if ( glType === gl.UNSIGNED_INT ) internalFormat = gl.R32UI;
if ( glType === gl.BYTE ) internalFormat = gl.R8I;
if ( glType === gl.SHORT ) internalFormat = gl.R16I;
if ( glType === gl.INT ) internalFormat = gl.R32I;
}
if ( glFormat === gl.RG ) {
if ( glType === gl.FLOAT ) internalFormat = gl.RG32F;
if ( glType === gl.HALF_FLOAT ) internalFormat = gl.RG16F;
if ( glType === gl.UNSIGNED_BYTE ) internalFormat = gl.RG8;
if ( glType === gl.UNSIGNED_SHORT ) internalFormat = gl.RG16;
if ( glType === gl.UNSIGNED_INT ) internalFormat = gl.RG32UI;
if ( glType === gl.BYTE ) internalFormat = gl.RG8I;
if ( glType === gl.SHORT ) internalFormat = gl.RG16I;
if ( glType === gl.INT ) internalFormat = gl.RG32I;
}
if ( glFormat === gl.RG_INTEGER ) {
if ( glType === gl.UNSIGNED_BYTE ) internalFormat = gl.RG8UI;
if ( glType === gl.UNSIGNED_SHORT ) internalFormat = gl.RG16UI;
if ( glType === gl.UNSIGNED_INT ) internalFormat = gl.RG32UI;
if ( glType === gl.BYTE ) internalFormat = gl.RG8I;
if ( glType === gl.SHORT ) internalFormat = gl.RG16I;
if ( glType === gl.INT ) internalFormat = gl.RG32I;
}
if ( glFormat === gl.RGB ) {
if ( glType === gl.FLOAT ) internalFormat = gl.RGB32F;
if ( glType === gl.HALF_FLOAT ) internalFormat = gl.RGB16F;
if ( glType === gl.UNSIGNED_BYTE ) internalFormat = gl.RGB8;
if ( glType === gl.UNSIGNED_SHORT ) internalFormat = gl.RGB16;
if ( glType === gl.UNSIGNED_INT ) internalFormat = gl.RGB32UI;
if ( glType === gl.BYTE ) internalFormat = gl.RGB8I;
if ( glType === gl.SHORT ) internalFormat = gl.RGB16I;
if ( glType === gl.INT ) internalFormat = gl.RGB32I;
if ( glType === gl.UNSIGNED_BYTE ) internalFormat = ( colorSpace === SRGBColorSpace && forceLinearTransfer === false ) ? gl.SRGB8 : gl.RGB8;
if ( glType === gl.UNSIGNED_SHORT_5_6_5 ) internalFormat = gl.RGB565;
if ( glType === gl.UNSIGNED_SHORT_5_5_5_1 ) internalFormat = gl.RGB5_A1;
if ( glType === gl.UNSIGNED_SHORT_4_4_4_4 ) internalFormat = gl.RGB4;
if ( glType === gl.UNSIGNED_INT_5_9_9_9_REV ) internalFormat = gl.RGB9_E5;
}
if ( glFormat === gl.RGB_INTEGER ) {
if ( glType === gl.UNSIGNED_BYTE ) internalFormat = gl.RGB8UI;
if ( glType === gl.UNSIGNED_SHORT ) internalFormat = gl.RGB16UI;
if ( glType === gl.UNSIGNED_INT ) internalFormat = gl.RGB32UI;
if ( glType === gl.BYTE ) internalFormat = gl.RGB8I;
if ( glType === gl.SHORT ) internalFormat = gl.RGB16I;
if ( glType === gl.INT ) internalFormat = gl.RGB32I;
}
if ( glFormat === gl.RGBA ) {
if ( glType === gl.FLOAT ) internalFormat = gl.RGBA32F;
if ( glType === gl.HALF_FLOAT ) internalFormat = gl.RGBA16F;
if ( glType === gl.UNSIGNED_BYTE ) internalFormat = gl.RGBA8;
if ( glType === gl.UNSIGNED_SHORT ) internalFormat = gl.RGBA16;
if ( glType === gl.UNSIGNED_INT ) internalFormat = gl.RGBA32UI;
if ( glType === gl.BYTE ) internalFormat = gl.RGBA8I;
if ( glType === gl.SHORT ) internalFormat = gl.RGBA16I;
if ( glType === gl.INT ) internalFormat = gl.RGBA32I;
if ( glType === gl.UNSIGNED_BYTE ) internalFormat = ( colorSpace === SRGBColorSpace && forceLinearTransfer === false ) ? gl.SRGB8_ALPHA8 : gl.RGBA8;
if ( glType === gl.UNSIGNED_SHORT_4_4_4_4 ) internalFormat = gl.RGBA4;
if ( glType === gl.UNSIGNED_SHORT_5_5_5_1 ) internalFormat = gl.RGB5_A1;
}
if ( glFormat === gl.RGBA_INTEGER ) {
if ( glType === gl.UNSIGNED_BYTE ) internalFormat = gl.RGBA8UI;
if ( glType === gl.UNSIGNED_SHORT ) internalFormat = gl.RGBA16UI;
if ( glType === gl.UNSIGNED_INT ) internalFormat = gl.RGBA32UI;
if ( glType === gl.BYTE ) internalFormat = gl.RGBA8I;
if ( glType === gl.SHORT ) internalFormat = gl.RGBA16I;
if ( glType === gl.INT ) internalFormat = gl.RGBA32I;
}
if ( glFormat === gl.DEPTH_COMPONENT ) {
if ( glType === gl.UNSIGNED_SHORT ) internalFormat = gl.DEPTH_COMPONENT16;
if ( glType === gl.UNSIGNED_INT ) internalFormat = gl.DEPTH_COMPONENT24;
if ( glType === gl.FLOAT ) internalFormat = gl.DEPTH_COMPONENT32F;
}
if ( glFormat === gl.DEPTH_STENCIL ) {
if ( glType === gl.UNSIGNED_INT_24_8 ) internalFormat = gl.DEPTH24_STENCIL8;
}
if ( internalFormat === gl.R16F || internalFormat === gl.R32F ||
internalFormat === gl.RG16F || internalFormat === gl.RG32F ||
internalFormat === gl.RGBA16F || internalFormat === gl.RGBA32F ) {
extensions.get( 'EXT_color_buffer_float' );
}
return internalFormat;
}
/**
* Sets the texture parameters for the given texture.
*
* @param {GLenum} textureType - The texture type.
* @param {Texture} texture - The texture.
*/
setTextureParameters( textureType, texture ) {
const { gl, extensions, backend } = this;
gl.pixelStorei( gl.UNPACK_FLIP_Y_WEBGL, texture.flipY );
gl.pixelStorei( gl.UNPACK_PREMULTIPLY_ALPHA_WEBGL, texture.premultiplyAlpha );
gl.pixelStorei( gl.UNPACK_ALIGNMENT, texture.unpackAlignment );
gl.pixelStorei( gl.UNPACK_COLORSPACE_CONVERSION_WEBGL, gl.NONE );
gl.texParameteri( textureType, gl.TEXTURE_WRAP_S, wrappingToGL[ texture.wrapS ] );
gl.texParameteri( textureType, gl.TEXTURE_WRAP_T, wrappingToGL[ texture.wrapT ] );
if ( textureType === gl.TEXTURE_3D || textureType === gl.TEXTURE_2D_ARRAY ) {
gl.texParameteri( textureType, gl.TEXTURE_WRAP_R, wrappingToGL[ texture.wrapR ] );
}
gl.texParameteri( textureType, gl.TEXTURE_MAG_FILTER, filterToGL[ texture.magFilter ] );
const hasMipmaps = texture.mipmaps !== undefined && texture.mipmaps.length > 0;
// follow WebGPU backend mapping for texture filtering
const minFilter = texture.minFilter === LinearFilter && hasMipmaps ? LinearMipmapLinearFilter : texture.minFilter;
gl.texParameteri( textureType, gl.TEXTURE_MIN_FILTER, filterToGL[ minFilter ] );
if ( texture.compareFunction ) {
gl.texParameteri( textureType, gl.TEXTURE_COMPARE_MODE, gl.COMPARE_REF_TO_TEXTURE );
gl.texParameteri( textureType, gl.TEXTURE_COMPARE_FUNC, compareToGL[ texture.compareFunction ] );
}
if ( extensions.has( 'EXT_texture_filter_anisotropic' ) === true ) {
if ( texture.magFilter === NearestFilter ) return;
if ( texture.minFilter !== NearestMipmapLinearFilter && texture.minFilter !== LinearMipmapLinearFilter ) return;
if ( texture.type === FloatType && extensions.has( 'OES_texture_float_linear' ) === false ) return; // verify extension for WebGL 1 and WebGL 2
if ( texture.anisotropy > 1 ) {
const extension = extensions.get( 'EXT_texture_filter_anisotropic' );
gl.texParameterf( textureType, extension.TEXTURE_MAX_ANISOTROPY_EXT, Math.min( texture.anisotropy, backend.getMaxAnisotropy() ) );
}
}
}
/**
* Creates a default texture for the given texture that can be used
* as a placeholder until the actual texture is ready for usage.
*
* @param {Texture} texture - The texture to create a default texture for.
*/
createDefaultTexture( texture ) {
const { gl, backend, defaultTextures } = this;
const glTextureType = this.getGLTextureType( texture );
let textureGPU = defaultTextures[ glTextureType ];
if ( textureGPU === undefined ) {
textureGPU = gl.createTexture();
backend.state.bindTexture( glTextureType, textureGPU );
gl.texParameteri( glTextureType, gl.TEXTURE_MIN_FILTER, gl.NEAREST );
gl.texParameteri( glTextureType, gl.TEXTURE_MAG_FILTER, gl.NEAREST );
// gl.texImage2D( glTextureType, 0, gl.RGBA, 1, 1, 0, gl.RGBA, gl.UNSIGNED_BYTE, data );
defaultTextures[ glTextureType ] = textureGPU;
}
backend.set( texture, {
textureGPU,
glTextureType,
isDefault: true
} );
}
/**
* Defines a texture on the GPU for the given texture object.
*
* @param {Texture} texture - The texture.
* @param {Object} [options={}] - Optional configuration parameter.
* @return {undefined}
*/
createTexture( texture, options ) {
const { gl, backend } = this;
const { levels, width, height, depth } = options;
const glFormat = backend.utils.convert( texture.format, texture.colorSpace );
const glType = backend.utils.convert( texture.type );
const glInternalFormat = this.getInternalFormat( texture.internalFormat, glFormat, glType, texture.colorSpace, texture.isVideoTexture );
const textureGPU = gl.createTexture();
const glTextureType = this.getGLTextureType( texture );
backend.state.bindTexture( glTextureType, textureGPU );
this.setTextureParameters( glTextureType, texture );
if ( texture.isDataArrayTexture || texture.isCompressedArrayTexture ) {
gl.texStorage3D( gl.TEXTURE_2D_ARRAY, levels, glInternalFormat, width, height, depth );
} else if ( texture.isData3DTexture ) {
gl.texStorage3D( gl.TEXTURE_3D, levels, glInternalFormat, width, height, depth );
} else if ( ! texture.isVideoTexture ) {
gl.texStorage2D( glTextureType, levels, glInternalFormat, width, height );
}
backend.set( texture, {
textureGPU,
glTextureType,
glFormat,
glType,
glInternalFormat
} );
}
/**
* Uploads texture buffer data to the GPU memory.
*
* @param {WebGLBuffer} buffer - The buffer data.
* @param {Texture} texture - The texture,
*/
copyBufferToTexture( buffer, texture ) {
const { gl, backend } = this;
const { textureGPU, glTextureType, glFormat, glType } = backend.get( texture );
const { width, height } = texture.source.data;
gl.bindBuffer( gl.PIXEL_UNPACK_BUFFER, buffer );
backend.state.bindTexture( glTextureType, textureGPU );
gl.pixelStorei( gl.UNPACK_FLIP_Y_WEBGL, false );
gl.pixelStorei( gl.UNPACK_PREMULTIPLY_ALPHA_WEBGL, false );
gl.texSubImage2D( glTextureType, 0, 0, 0, width, height, glFormat, glType, 0 );
gl.bindBuffer( gl.PIXEL_UNPACK_BUFFER, null );
backend.state.unbindTexture();
// debug
// const framebuffer = gl.createFramebuffer();
// gl.bindFramebuffer( gl.FRAMEBUFFER, framebuffer );
// gl.framebufferTexture2D( gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, glTextureType, textureGPU, 0 );
// const readout = new Float32Array( width * height * 4 );
// const altFormat = gl.getParameter( gl.IMPLEMENTATION_COLOR_READ_FORMAT );
// const altType = gl.getParameter( gl.IMPLEMENTATION_COLOR_READ_TYPE );
// gl.readPixels( 0, 0, width, height, altFormat, altType, readout );
// gl.bindFramebuffer( gl.FRAMEBUFFER, null );
// console.log( readout );
}
/**
* Uploads the updated texture data to the GPU.
*
* @param {Texture} texture - The texture.
* @param {Object} [options={}] - Optional configuration parameter.
*/
updateTexture( texture, options ) {
const { gl } = this;
const { width, height } = options;
const { textureGPU, glTextureType, glFormat, glType, glInternalFormat } = this.backend.get( texture );
if ( texture.isRenderTargetTexture || ( textureGPU === undefined /* unsupported texture format */ ) )
return;
const getImage = ( source ) => {
if ( source.isDataTexture ) {
return source.image.data;
} else if ( ( typeof HTMLImageElement !== 'undefined' && source instanceof HTMLImageElement ) ||
( typeof HTMLCanvasElement !== 'undefined' && source instanceof HTMLCanvasElement ) ||
( typeof ImageBitmap !== 'undefined' && source instanceof ImageBitmap ) ||
source instanceof OffscreenCanvas ) {
return source;
}
return source.data;
};
this.backend.state.bindTexture( glTextureType, textureGPU );
this.setTextureParameters( glTextureType, texture );
if ( texture.isCompressedTexture ) {
const mipmaps = texture.mipmaps;
const image = options.image;
for ( let i = 0; i < mipmaps.length; i ++ ) {
const mipmap = mipmaps[ i ];
if ( texture.isCompressedArrayTexture ) {
if ( texture.format !== gl.RGBA ) {
if ( glFormat !== null ) {
gl.compressedTexSubImage3D( gl.TEXTURE_2D_ARRAY, i, 0, 0, 0, mipmap.width, mipmap.height, image.depth, glFormat, mipmap.data );
} else {
console.warn( 'THREE.WebGLRenderer: Attempt to load unsupported compressed texture format in .uploadTexture()' );
}
} else {
gl.texSubImage3D( gl.TEXTURE_2D_ARRAY, i, 0, 0, 0, mipmap.width, mipmap.height, image.depth, glFormat, glType, mipmap.data );
}
} else {
if ( glFormat !== null ) {
gl.compressedTexSubImage2D( gl.TEXTURE_2D, i, 0, 0, mipmap.width, mipmap.height, glFormat, mipmap.data );
} else {
console.warn( 'Unsupported compressed texture format' );
}
}
}
} else if ( texture.isCubeTexture ) {
const images = options.images;
for ( let i = 0; i < 6; i ++ ) {
const image = getImage( images[ i ] );
gl.texSubImage2D( gl.TEXTURE_CUBE_MAP_POSITIVE_X + i, 0, 0, 0, width, height, glFormat, glType, image );
}
} else if ( texture.isDataArrayTexture ) {
const image = options.image;
gl.texSubImage3D( gl.TEXTURE_2D_ARRAY, 0, 0, 0, 0, image.width, image.height, image.depth, glFormat, glType, image.data );
} else if ( texture.isData3DTexture ) {
const image = options.image;
gl.texSubImage3D( gl.TEXTURE_3D, 0, 0, 0, 0, image.width, image.height, image.depth, glFormat, glType, image.data );
} else if ( texture.isVideoTexture ) {
texture.update();
gl.texImage2D( glTextureType, 0, glInternalFormat, glFormat, glType, options.image );
} else {
const image = getImage( options.image );
gl.texSubImage2D( glTextureType, 0, 0, 0, width, height, glFormat, glType, image );
}
}
/**
* Generates mipmaps for the given texture.
*
* @param {Texture} texture - The texture.
*/
generateMipmaps( texture ) {
const { gl, backend } = this;
const { textureGPU, glTextureType } = backend.get( texture );
backend.state.bindTexture( glTextureType, textureGPU );
gl.generateMipmap( glTextureType );
}
/**
* Deallocates the render buffers of the given render target.
*
* @param {RenderTarget} renderTarget - The render target.
*/
deallocateRenderBuffers( renderTarget ) {
const { gl, backend } = this;
// remove framebuffer reference
if ( renderTarget ) {
const renderContextData = backend.get( renderTarget );
renderContextData.renderBufferStorageSetup = undefined;
if ( renderContextData.framebuffers ) {
for ( const cacheKey in renderContextData.framebuffers ) {
gl.deleteFramebuffer( renderContextData.framebuffers[ cacheKey ] );
}
delete renderContextData.framebuffers;
}
if ( renderContextData.depthRenderbuffer ) {
gl.deleteRenderbuffer( renderContextData.depthRenderbuffer );
delete renderContextData.depthRenderbuffer;
}
if ( renderContextData.stencilRenderbuffer ) {
gl.deleteRenderbuffer( renderContextData.stencilRenderbuffer );
delete renderContextData.stencilRenderbuffer;
}
if ( renderContextData.msaaFrameBuffer ) {
gl.deleteFramebuffer( renderContextData.msaaFrameBuffer );
delete renderContextData.msaaFrameBuffer;
}
if ( renderContextData.msaaRenderbuffers ) {
for ( let i = 0; i < renderContextData.msaaRenderbuffers.length; i ++ ) {
gl.deleteRenderbuffer( renderContextData.msaaRenderbuffers[ i ] );
}
delete renderContextData.msaaRenderbuffers;
}
}
}
/**
* Destroys the GPU data for the given texture object.
*
* @param {Texture} texture - The texture.
*/
destroyTexture( texture ) {
const { gl, backend } = this;
const { textureGPU, renderTarget } = backend.get( texture );
this.deallocateRenderBuffers( renderTarget );
gl.deleteTexture( textureGPU );
backend.delete( texture );
}
/**
* Copies data of the given source texture to the given destination texture.
*
* @param {Texture} srcTexture - The source texture.
* @param {Texture} dstTexture - The destination texture.
* @param {Vector4?} [srcRegion=null] - The region of the source texture to copy.
* @param {(Vector2|Vector3)?} [dstPosition=null] - The destination position of the copy.
* @param {Number} [level=0] - The mip level to copy.
*/
copyTextureToTexture( srcTexture, dstTexture, srcRegion = null, dstPosition = null, level = 0 ) {
const { gl, backend } = this;
const { state } = this.backend;
const { textureGPU: dstTextureGPU, glTextureType, glType, glFormat } = backend.get( dstTexture );
let width, height, minX, minY;
let dstX, dstY;
if ( srcRegion !== null ) {
width = srcRegion.max.x - srcRegion.min.x;
height = srcRegion.max.y - srcRegion.min.y;
minX = srcRegion.min.x;
minY = srcRegion.min.y;
} else {
width = srcTexture.image.width;
height = srcTexture.image.height;
minX = 0;
minY = 0;
}
if ( dstPosition !== null ) {
dstX = dstPosition.x;
dstY = dstPosition.y;
} else {
dstX = 0;
dstY = 0;
}
state.bindTexture( glTextureType, dstTextureGPU );
// As another texture upload may have changed pixelStorei
// parameters, make sure they are correct for the dstTexture
gl.pixelStorei( gl.UNPACK_ALIGNMENT, dstTexture.unpackAlignment );
gl.pixelStorei( gl.UNPACK_FLIP_Y_WEBGL, dstTexture.flipY );
gl.pixelStorei( gl.UNPACK_PREMULTIPLY_ALPHA_WEBGL, dstTexture.premultiplyAlpha );
gl.pixelStorei( gl.UNPACK_ALIGNMENT, dstTexture.unpackAlignment );
const currentUnpackRowLen = gl.getParameter( gl.UNPACK_ROW_LENGTH );
const currentUnpackImageHeight = gl.getParameter( gl.UNPACK_IMAGE_HEIGHT );
const currentUnpackSkipPixels = gl.getParameter( gl.UNPACK_SKIP_PIXELS );
const currentUnpackSkipRows = gl.getParameter( gl.UNPACK_SKIP_ROWS );
const currentUnpackSkipImages = gl.getParameter( gl.UNPACK_SKIP_IMAGES );
const image = srcTexture.isCompressedTexture ? srcTexture.mipmaps[ level ] : srcTexture.image;
gl.pixelStorei( gl.UNPACK_ROW_LENGTH, image.width );
gl.pixelStorei( gl.UNPACK_IMAGE_HEIGHT, image.height );
gl.pixelStorei( gl.UNPACK_SKIP_PIXELS, minX );
gl.pixelStorei( gl.UNPACK_SKIP_ROWS, minY );
if ( srcTexture.isRenderTargetTexture || srcTexture.isDepthTexture ) {
const srcTextureData = backend.get( srcTexture );
const dstTextureData = backend.get( dstTexture );
const srcRenderContextData = backend.get( srcTextureData.renderTarget );
const dstRenderContextData = backend.get( dstTextureData.renderTarget );
const srcFramebuffer = srcRenderContextData.framebuffers[ srcTextureData.cacheKey ];
const dstFramebuffer = dstRenderContextData.framebuffers[ dstTextureData.cacheKey ];
state.bindFramebuffer( gl.READ_FRAMEBUFFER, srcFramebuffer );
state.bindFramebuffer( gl.DRAW_FRAMEBUFFER, dstFramebuffer );
let mask = gl.COLOR_BUFFER_BIT;
if ( srcTexture.isDepthTexture ) mask = gl.DEPTH_BUFFER_BIT;
gl.blitFramebuffer( minX, minY, width, height, dstX, dstY, width, height, mask, gl.NEAREST );
state.bindFramebuffer( gl.READ_FRAMEBUFFER, null );
state.bindFramebuffer( gl.DRAW_FRAMEBUFFER, null );
} else {
if ( srcTexture.isDataTexture ) {
gl.texSubImage2D( gl.TEXTURE_2D, level, dstX, dstY, width, height, glFormat, glType, image.data );
} else {
if ( srcTexture.isCompressedTexture ) {
gl.compressedTexSubImage2D( gl.TEXTURE_2D, level, dstX, dstY, image.width, image.height, glFormat, image.data );
} else {
gl.texSubImage2D( gl.TEXTURE_2D, level, dstX, dstY, width, height, glFormat, glType, image );
}
}
}
gl.pixelStorei( gl.UNPACK_ROW_LENGTH, currentUnpackRowLen );
gl.pixelStorei( gl.UNPACK_IMAGE_HEIGHT, currentUnpackImageHeight );
gl.pixelStorei( gl.UNPACK_SKIP_PIXELS, currentUnpackSkipPixels );
gl.pixelStorei( gl.UNPACK_SKIP_ROWS, currentUnpackSkipRows );
gl.pixelStorei( gl.UNPACK_SKIP_IMAGES, currentUnpackSkipImages );
// Generate mipmaps only when copying level 0
if ( level === 0 && dstTexture.generateMipmaps ) gl.generateMipmap( gl.TEXTURE_2D );
state.unbindTexture();
}
/**
* Copies the current bound framebuffer to the given texture.
*
* @param {Texture} texture - The destination texture.
* @param {RenderContext} renderContext - The render context.
* @param {Vector4} rectangle - A four dimensional vector defining the origin and dimension of the copy.
*/
copyFramebufferToTexture( texture, renderContext, rectangle ) {
const { gl } = this;
const { state } = this.backend;
const { textureGPU } = this.backend.get( texture );
const { x, y, z: width, w: height } = rectangle;
const requireDrawFrameBuffer = texture.isDepthTexture === true || ( renderContext.renderTarget && renderContext.renderTarget.samples > 0 );
const srcHeight = renderContext.renderTarget ? renderContext.renderTarget.height : this.backend.getDrawingBufferSize().y;
if ( requireDrawFrameBuffer ) {
const partial = ( x !== 0 || y !== 0 );
let mask;
let attachment;
if ( texture.isDepthTexture === true ) {
mask = gl.DEPTH_BUFFER_BIT;
attachment = gl.DEPTH_ATTACHMENT;
if ( renderContext.stencil ) {
mask |= gl.STENCIL_BUFFER_BIT;
}
} else {
mask = gl.COLOR_BUFFER_BIT;
attachment = gl.COLOR_ATTACHMENT0;
}
if ( partial ) {
const renderTargetContextData = this.backend.get( renderContext.renderTarget );
const fb = renderTargetContextData.framebuffers[ renderContext.getCacheKey() ];
const msaaFrameBuffer = renderTargetContextData.msaaFrameBuffer;
state.bindFramebuffer( gl.DRAW_FRAMEBUFFER, fb );
state.bindFramebuffer( gl.READ_FRAMEBUFFER, msaaFrameBuffer );
const flippedY = srcHeight - y - height;
gl.blitFramebuffer( x, flippedY, x + width, flippedY + height, x, flippedY, x + width, flippedY + height, mask, gl.NEAREST );
state.bindFramebuffer( gl.READ_FRAMEBUFFER, fb );
state.bindTexture( gl.TEXTURE_2D, textureGPU );
gl.copyTexSubImage2D( gl.TEXTURE_2D, 0, 0, 0, x, flippedY, width, height );
state.unbindTexture();
} else {
const fb = gl.createFramebuffer();
state.bindFramebuffer( gl.DRAW_FRAMEBUFFER, fb );
gl.framebufferTexture2D( gl.DRAW_FRAMEBUFFER, attachment, gl.TEXTURE_2D, textureGPU, 0 );
gl.blitFramebuffer( 0, 0, width, height, 0, 0, width, height, mask, gl.NEAREST );
gl.deleteFramebuffer( fb );
}
} else {
state.bindTexture( gl.TEXTURE_2D, textureGPU );
gl.copyTexSubImage2D( gl.TEXTURE_2D, 0, 0, 0, x, srcHeight - height - y, width, height );
state.unbindTexture();
}
if ( texture.generateMipmaps ) this.generateMipmaps( texture );
this.backend._setFramebuffer( renderContext );
}
/**
* SetupS storage for internal depth/stencil buffers and bind to correct framebuffer.
*
* @param {WebGLRenderbuffer} renderbuffer - The render buffer.
* @param {RenderContext} renderContext - The render context.
* @param {Number} samples - The MSAA sample count.
* @param {Boolean} [useMultisampledRTT=false] - Whether to use WEBGL_multisampled_render_to_texture or not.
*/
setupRenderBufferStorage( renderbuffer, renderContext, samples, useMultisampledRTT = false ) {
const { gl } = this;
const renderTarget = renderContext.renderTarget;
const { depthTexture, depthBuffer, stencilBuffer, width, height } = renderTarget;
gl.bindRenderbuffer( gl.RENDERBUFFER, renderbuffer );
if ( depthBuffer && ! stencilBuffer ) {
let glInternalFormat = gl.DEPTH_COMPONENT24;
if ( useMultisampledRTT === true ) {
const multisampledRTTExt = this.extensions.get( 'WEBGL_multisampled_render_to_texture' );
multisampledRTTExt.renderbufferStorageMultisampleEXT( gl.RENDERBUFFER, renderTarget.samples, glInternalFormat, width, height );
} else if ( samples > 0 ) {
if ( depthTexture && depthTexture.isDepthTexture ) {
if ( depthTexture.type === gl.FLOAT ) {
glInternalFormat = gl.DEPTH_COMPONENT32F;
}
}
gl.renderbufferStorageMultisample( gl.RENDERBUFFER, samples, glInternalFormat, width, height );
} else {
gl.renderbufferStorage( gl.RENDERBUFFER, glInternalFormat, width, height );
}
gl.framebufferRenderbuffer( gl.FRAMEBUFFER, gl.DEPTH_ATTACHMENT, gl.RENDERBUFFER, renderbuffer );
} else if ( depthBuffer && stencilBuffer ) {
if ( samples > 0 ) {
gl.renderbufferStorageMultisample( gl.RENDERBUFFER, samples, gl.DEPTH24_STENCIL8, width, height );
} else {
gl.renderbufferStorage( gl.RENDERBUFFER, gl.DEPTH_STENCIL, width, height );
}
gl.framebufferRenderbuffer( gl.FRAMEBUFFER, gl.DEPTH_STENCIL_ATTACHMENT, gl.RENDERBUFFER, renderbuffer );
}
}
/**
* Returns texture data as a typed array.
*
* @async
* @param {Texture} texture - The texture to copy.
* @param {Number} x - The x coordinate of the copy origin.
* @param {Number} y - The y coordinate of the copy origin.
* @param {Number} width - The width of the copy.
* @param {Number} height - The height of the copy.
* @param {Number} faceIndex - The face index.
* @return {Promise<TypedArray>} A Promise that resolves with a typed array when the copy operation has finished.
*/
async copyTextureToBuffer( texture, x, y, width, height, faceIndex ) {
const { backend, gl } = this;
const { textureGPU, glFormat, glType } = this.backend.get( texture );
const fb = gl.createFramebuffer();
gl.bindFramebuffer( gl.READ_FRAMEBUFFER, fb );
const target = texture.isCubeTexture ? gl.TEXTURE_CUBE_MAP_POSITIVE_X + faceIndex : gl.TEXTURE_2D;
gl.framebufferTexture2D( gl.READ_FRAMEBUFFER, gl.COLOR_ATTACHMENT0, target, textureGPU, 0 );
const typedArrayType = this._getTypedArrayType( glType );
const bytesPerTexel = this._getBytesPerTexel( glType, glFormat );
const elementCount = width * height;
const byteLength = elementCount * bytesPerTexel;
const buffer = gl.createBuffer();
gl.bindBuffer( gl.PIXEL_PACK_BUFFER, buffer );
gl.bufferData( gl.PIXEL_PACK_BUFFER, byteLength, gl.STREAM_READ );
gl.readPixels( x, y, width, height, glFormat, glType, 0 );
gl.bindBuffer( gl.PIXEL_PACK_BUFFER, null );
await backend.utils._clientWaitAsync();
const dstBuffer = new typedArrayType( byteLength / typedArrayType.BYTES_PER_ELEMENT );
gl.bindBuffer( gl.PIXEL_PACK_BUFFER, buffer );
gl.getBufferSubData( gl.PIXEL_PACK_BUFFER, 0, dstBuffer );
gl.bindBuffer( gl.PIXEL_PACK_BUFFER, null );
gl.deleteFramebuffer( fb );
return dstBuffer;
}
/**
* Returns the corresponding typed array type for the given WebGL data type.
*
* @private
* @param {GLenum} glType - The WebGL data type.
* @return {TypedArray.constructor} The typed array type.
*/
_getTypedArrayType( glType ) {
const { gl } = this;
if ( glType === gl.UNSIGNED_BYTE ) return Uint8Array;
if ( glType === gl.UNSIGNED_SHORT_4_4_4_4 ) return Uint16Array;
if ( glType === gl.UNSIGNED_SHORT_5_5_5_1 ) return Uint16Array;
if ( glType === gl.UNSIGNED_SHORT_5_6_5 ) return Uint16Array;
if ( glType === gl.UNSIGNED_SHORT ) return Uint16Array;
if ( glType === gl.UNSIGNED_INT ) return Uint32Array;
if ( glType === gl.HALF_FLOAT ) return Uint16Array;
if ( glType === gl.FLOAT ) return Float32Array;
throw new Error( `Unsupported WebGL type: ${glType}` );
}
/**
* Returns the bytes-per-texel value for the given WebGL data type and texture format.
*
* @private
* @param {GLenum} glType - The WebGL data type.
* @param {GLenum} glFormat - The WebGL texture format.
* @return {Number} The bytes-per-texel.
*/
_getBytesPerTexel( glType, glFormat ) {
const { gl } = this;
let bytesPerComponent = 0;
if ( glType === gl.UNSIGNED_BYTE ) bytesPerComponent = 1;
if ( glType === gl.UNSIGNED_SHORT_4_4_4_4 ||
glType === gl.UNSIGNED_SHORT_5_5_5_1 ||
glType === gl.UNSIGNED_SHORT_5_6_5 ||
glType === gl.UNSIGNED_SHORT ||
glType === gl.HALF_FLOAT ) bytesPerComponent = 2;
if ( glType === gl.UNSIGNED_INT ||
glType === gl.FLOAT ) bytesPerComponent = 4;
if ( glFormat === gl.RGBA ) return bytesPerComponent * 4;
if ( glFormat === gl.RGB ) return bytesPerComponent * 3;
if ( glFormat === gl.ALPHA ) return bytesPerComponent;
}
}
/**
* A WebGL 2 backend utility module for managing extensions.
*
* @private
*/
class WebGLExtensions {
/**
* Constructs a new utility object.
*
* @param {WebGLBackend} backend - The WebGL 2 backend.
*/
constructor( backend ) {
/**
* A reference to the WebGL 2 backend.
*
* @type {WebGLBackend}
*/
this.backend = backend;
/**
* A reference to the rendering context.
*
* @type {WebGL2RenderingContext}
*/
this.gl = this.backend.gl;
/**
* A list with all the supported WebGL extensions.
*
* @type {Array<String>}
*/
this.availableExtensions = this.gl.getSupportedExtensions();
/**
* A dictionary with requested WebGL extensions.
* The key is the name of the extension, the value
* the requested extension object.
*
* @type {Object<String,Object>}
*/
this.extensions = {};
}
/**
* Returns the extension object for the given extension name.
*
* @param {String} name - The extension name.
* @return {Object} The extension object.
*/
get( name ) {
let extension = this.extensions[ name ];
if ( extension === undefined ) {
extension = this.gl.getExtension( name );
this.extensions[ name ] = extension;
}
return extension;
}
/**
* Returns `true` if the requested extension is available.
*
* @param {String} name - The extension name.
* @return {Boolean} Whether the given extension is available or not.
*/
has( name ) {
return this.availableExtensions.includes( name );
}
}
/**
* A WebGL 2 backend utility module for managing the device's capabilities.
*
* @private
*/
class WebGLCapabilities {
/**
* Constructs a new utility object.
*
* @param {WebGLBackend} backend - The WebGL 2 backend.
*/
constructor( backend ) {
/**
* A reference to the WebGL 2 backend.
*
* @type {WebGLBackend}
*/
this.backend = backend;
/**
* This value holds the cached max anisotropy value.
*
* @type {Number?}
* @default null
*/
this.maxAnisotropy = null;
}
/**
* Returns the maximum anisotropy texture filtering value. This value
* depends on the device and is reported by the `EXT_texture_filter_anisotropic`
* WebGL extension.
*
* @return {Number} The maximum anisotropy texture filtering value.
*/
getMaxAnisotropy() {
if ( this.maxAnisotropy !== null ) return this.maxAnisotropy;
const gl = this.backend.gl;
const extensions = this.backend.extensions;
if ( extensions.has( 'EXT_texture_filter_anisotropic' ) === true ) {
const extension = extensions.get( 'EXT_texture_filter_anisotropic' );
this.maxAnisotropy = gl.getParameter( extension.MAX_TEXTURE_MAX_ANISOTROPY_EXT );
} else {
this.maxAnisotropy = 0;
}
return this.maxAnisotropy;
}
}
const GLFeatureName = {
'WEBGL_multi_draw': 'WEBGL_multi_draw',
'WEBGL_compressed_texture_astc': 'texture-compression-astc',
'WEBGL_compressed_texture_etc': 'texture-compression-etc2',
'WEBGL_compressed_texture_etc1': 'texture-compression-etc1',
'WEBGL_compressed_texture_pvrtc': 'texture-compression-pvrtc',
'WEBKIT_WEBGL_compressed_texture_pvrtc': 'texture-compression-pvrtc',
'WEBGL_compressed_texture_s3tc': 'texture-compression-bc',
'EXT_texture_compression_bptc': 'texture-compression-bptc',
'EXT_disjoint_timer_query_webgl2': 'timestamp-query',
};
class WebGLBufferRenderer {
constructor( backend ) {
this.gl = backend.gl;
this.extensions = backend.extensions;
this.info = backend.renderer.info;
this.mode = null;
this.index = 0;
this.type = null;
this.object = null;
}
render( start, count ) {
const { gl, mode, object, type, info, index } = this;
if ( index !== 0 ) {
gl.drawElements( mode, count, type, start );
} else {
gl.drawArrays( mode, start, count );
}
info.update( object, count, mode, 1 );
}
renderInstances( start, count, primcount ) {
const { gl, mode, type, index, object, info } = this;
if ( primcount === 0 ) return;
if ( index !== 0 ) {
gl.drawElementsInstanced( mode, count, type, start, primcount );
} else {
gl.drawArraysInstanced( mode, start, count, primcount );
}
info.update( object, count, mode, primcount );
}
renderMultiDraw( starts, counts, drawCount ) {
const { extensions, mode, object, info } = this;
if ( drawCount === 0 ) return;
const extension = extensions.get( 'WEBGL_multi_draw' );
if ( extension === null ) {
for ( let i = 0; i < drawCount; i ++ ) {
this.render( starts[ i ], counts[ i ] );
}
} else {
if ( this.index !== 0 ) {
extension.multiDrawElementsWEBGL( mode, counts, 0, this.type, starts, 0, drawCount );
} else {
extension.multiDrawArraysWEBGL( mode, starts, 0, counts, 0, drawCount );
}
let elementCount = 0;
for ( let i = 0; i < drawCount; i ++ ) {
elementCount += counts[ i ];
}
info.update( object, elementCount, mode, 1 );
}
}
renderMultiDrawInstances( starts, counts, drawCount, primcount ) {
const { extensions, mode, object, info } = this;
if ( drawCount === 0 ) return;
const extension = extensions.get( 'WEBGL_multi_draw' );
if ( extension === null ) {
for ( let i = 0; i < drawCount; i ++ ) {
this.renderInstances( starts[ i ], counts[ i ], primcount[ i ] );
}
} else {
if ( this.index !== 0 ) {
extension.multiDrawElementsInstancedWEBGL( mode, counts, 0, this.type, starts, 0, primcount, 0, drawCount );
} else {
extension.multiDrawArraysInstancedWEBGL( mode, starts, 0, counts, 0, primcount, 0, drawCount );
}
let elementCount = 0;
for ( let i = 0; i < drawCount; i ++ ) {
elementCount += counts[ i ] * primcount[ i ];
}
info.update( object, elementCount, mode, 1 );
}
}
//
}
class TimestampQueryPool {
constructor( maxQueries = 256 ) {
this.trackTimestamp = true;
this.maxQueries = maxQueries;
this.currentQueryIndex = 0; // how many queries allocated so far
this.queryOffsets = new Map(); // track offsets for different contexts
this.isDisposed = false;
this.lastValue = 0;
this.pendingResolve = false;
}
/**
* Allocate queries for a specific renderContext.
*
* @abstract
*/
allocateQueriesForContext( /* renderContext */ ) {}
/**
* Resolve all timestamps and return data (or process them).
*
* @abstract
* @returns {Promise<Number>|Number} The resolved timestamp value.
*/
async resolveQueriesAsync() {}
/**
* Dispose of the query pool.
*
* @abstract
*/
dispose() {}
}
/**
* Manages a pool of WebGL timestamp queries for performance measurement.
* Handles creation, execution, and resolution of timer queries using WebGL extensions.
* @extends TimestampQueryPool
*/
class WebGLTimestampQueryPool extends TimestampQueryPool {
/**
* Creates a new WebGL timestamp query pool.
* @param {WebGLRenderingContext|WebGL2RenderingContext} gl - The WebGL context.
* @param {string} type - The type identifier for this query pool.
* @param {number} [maxQueries=2048] - Maximum number of queries this pool can hold.
*/
constructor( gl, type, maxQueries = 2048 ) {
super( maxQueries );
this.gl = gl;
this.type = type;
// Check for timer query extensions
this.ext = gl.getExtension( 'EXT_disjoint_timer_query_webgl2' ) ||
gl.getExtension( 'EXT_disjoint_timer_query' );
if ( ! this.ext ) {
console.warn( 'EXT_disjoint_timer_query not supported; timestamps will be disabled.' );
this.trackTimestamp = false;
return;
}
// Create query objects
this.queries = [];
for ( let i = 0; i < this.maxQueries; i ++ ) {
this.queries.push( gl.createQuery() );
}
this.activeQuery = null;
this.queryStates = new Map(); // Track state of each query: 'inactive', 'started', 'ended'
}
/**
* Allocates a pair of queries for a given render context.
* @param {Object} renderContext - The render context to allocate queries for.
* @returns {?number} The base offset for the allocated queries, or null if allocation failed.
*/
allocateQueriesForContext( renderContext ) {
if ( ! this.trackTimestamp ) return null;
// Check if we have enough space for a new query pair
if ( this.currentQueryIndex + 2 > this.maxQueries ) {
warnOnce( `WebGPUTimestampQueryPool [${ this.type }]: Maximum number of queries exceeded, when using trackTimestamp it is necessary to resolves the queries via renderer.resolveTimestampsAsync( THREE.TimestampQuery.${ this.type.toUpperCase() } ).` );
return null;
}
const baseOffset = this.currentQueryIndex;
this.currentQueryIndex += 2;
// Initialize query states
this.queryStates.set( baseOffset, 'inactive' );
this.queryOffsets.set( renderContext.id, baseOffset );
return baseOffset;
}
/**
* Begins a timestamp query for the specified render context.
* @param {Object} renderContext - The render context to begin timing for.
*/
beginQuery( renderContext ) {
if ( ! this.trackTimestamp || this.isDisposed ) {
return;
}
const baseOffset = this.queryOffsets.get( renderContext.id );
if ( baseOffset == null ) {
return;
}
// Don't start a new query if there's an active one
if ( this.activeQuery !== null ) {
return;
}
const query = this.queries[ baseOffset ];
if ( ! query ) {
return;
}
try {
// Only begin if query is inactive
if ( this.queryStates.get( baseOffset ) === 'inactive' ) {
this.gl.beginQuery( this.ext.TIME_ELAPSED_EXT, query );
this.activeQuery = baseOffset;
this.queryStates.set( baseOffset, 'started' );
}
} catch ( error ) {
console.error( 'Error in beginQuery:', error );
this.activeQuery = null;
this.queryStates.set( baseOffset, 'inactive' );
}
}
/**
* Ends the active timestamp query for the specified render context.
* @param {Object} renderContext - The render context to end timing for.
* @param {string} renderContext.id - Unique identifier for the render context.
*/
endQuery( renderContext ) {
if ( ! this.trackTimestamp || this.isDisposed ) {
return;
}
const baseOffset = this.queryOffsets.get( renderContext.id );
if ( baseOffset == null ) {
return;
}
// Only end if this is the active query
if ( this.activeQuery !== baseOffset ) {
return;
}
try {
this.gl.endQuery( this.ext.TIME_ELAPSED_EXT );
this.queryStates.set( baseOffset, 'ended' );
this.activeQuery = null;
} catch ( error ) {
console.error( 'Error in endQuery:', error );
// Reset state on error
this.queryStates.set( baseOffset, 'inactive' );
this.activeQuery = null;
}
}
/**
* Asynchronously resolves all completed queries and returns the total duration.
* @returns {Promise<number>} The total duration in milliseconds, or the last valid value if resolution fails.
*/
async resolveQueriesAsync() {
if ( ! this.trackTimestamp || this.pendingResolve ) {
return this.lastValue;
}
this.pendingResolve = true;
try {
// Wait for all ended queries to complete
const resolvePromises = [];
for ( const [ baseOffset, state ] of this.queryStates ) {
if ( state === 'ended' ) {
const query = this.queries[ baseOffset ];
resolvePromises.push( this.resolveQuery( query ) );
}
}
if ( resolvePromises.length === 0 ) {
return this.lastValue;
}
const results = await Promise.all( resolvePromises );
const totalDuration = results.reduce( ( acc, val ) => acc + val, 0 );
// Store the last valid result
this.lastValue = totalDuration;
// Reset states
this.currentQueryIndex = 0;
this.queryOffsets.clear();
this.queryStates.clear();
this.activeQuery = null;
return totalDuration;
} catch ( error ) {
console.error( 'Error resolving queries:', error );
return this.lastValue;
} finally {
this.pendingResolve = false;
}
}
/**
* Resolves a single query, checking for completion and disjoint operation.
* @private
* @param {WebGLQuery} query - The query object to resolve.
* @returns {Promise<number>} The elapsed time in milliseconds.
*/
async resolveQuery( query ) {
return new Promise( ( resolve ) => {
if ( this.isDisposed ) {
resolve( this.lastValue );
return;
}
let timeoutId;
let isResolved = false;
const cleanup = () => {
if ( timeoutId ) {
clearTimeout( timeoutId );
timeoutId = null;
}
};
const finalizeResolution = ( value ) => {
if ( ! isResolved ) {
isResolved = true;
cleanup();
resolve( value );
}
};
const checkQuery = () => {
if ( this.isDisposed ) {
finalizeResolution( this.lastValue );
return;
}
try {
// Check if the GPU timer was disjoint (i.e., timing was unreliable)
const disjoint = this.gl.getParameter( this.ext.GPU_DISJOINT_EXT );
if ( disjoint ) {
finalizeResolution( this.lastValue );
return;
}
const available = this.gl.getQueryParameter( query, this.gl.QUERY_RESULT_AVAILABLE );
if ( ! available ) {
timeoutId = setTimeout( checkQuery, 1 );
return;
}
const elapsed = this.gl.getQueryParameter( query, this.gl.QUERY_RESULT );
resolve( Number( elapsed ) / 1e6 ); // Convert nanoseconds to milliseconds
} catch ( error ) {
console.error( 'Error checking query:', error );
resolve( this.lastValue );
}
};
checkQuery();
} );
}
/**
* Releases all resources held by this query pool.
* This includes deleting all query objects and clearing internal state.
*/
dispose() {
if ( this.isDisposed ) {
return;
}
this.isDisposed = true;
if ( ! this.trackTimestamp ) return;
for ( const query of this.queries ) {
this.gl.deleteQuery( query );
}
this.queries = [];
this.queryStates.clear();
this.queryOffsets.clear();
this.lastValue = 0;
this.activeQuery = null;
}
}
/**
* A backend implementation targeting WebGL 2.
*
* @private
* @augments Backend
*/
class WebGLBackend extends Backend {
/**
* Constructs a new WebGPU backend.
*
* @param {Object} parameters - The configuration parameter.
* @param {Boolean} [parameters.logarithmicDepthBuffer=false] - Whether logarithmic depth buffer is enabled or not.
* @param {Boolean} [parameters.alpha=true] - Whether the default framebuffer (which represents the final contents of the canvas) should be transparent or opaque.
* @param {Boolean} [parameters.depth=true] - Whether the default framebuffer should have a depth buffer or not.
* @param {Boolean} [parameters.stencil=false] - Whether the default framebuffer should have a stencil buffer or not.
* @param {Boolean} [parameters.antialias=false] - Whether MSAA as the default anti-aliasing should be enabled or not.
* @param {Number} [parameters.samples=0] - When `antialias` is `true`, `4` samples are used by default. Set this parameter to any other integer value than 0 to overwrite the default.
* @param {Boolean} [parameters.forceWebGL=false] - If set to `true`, the renderer uses a WebGL 2 backend no matter if WebGPU is supported or not.
* @param {WebGL2RenderingContext} [parameters.context=undefined] - A WebGL 2 rendering context.
*/
constructor( parameters = {} ) {
super( parameters );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isWebGLBackend = true;
/**
* A reference to a backend module holding shader attribute-related
* utility functions.
*
* @type {WebGLAttributeUtils?}
* @default null
*/
this.attributeUtils = null;
/**
* A reference to a backend module holding extension-related
* utility functions.
*
* @type {WebGLExtensions?}
* @default null
*/
this.extensions = null;
/**
* A reference to a backend module holding capability-related
* utility functions.
*
* @type {WebGLCapabilities?}
* @default null
*/
this.capabilities = null;
/**
* A reference to a backend module holding texture-related
* utility functions.
*
* @type {WebGLTextureUtils?}
* @default null
*/
this.textureUtils = null;
/**
* A reference to a backend module holding renderer-related
* utility functions.
*
* @type {WebGLBufferRenderer?}
* @default null
*/
this.bufferRenderer = null;
/**
* A reference to the rendering context.
*
* @type {WebGL2RenderingContext?}
* @default null
*/
this.gl = null;
/**
* A reference to a backend module holding state-related
* utility functions.
*
* @type {WebGLState?}
* @default null
*/
this.state = null;
/**
* A reference to a backend module holding common
* utility functions.
*
* @type {WebGLUtils?}
* @default null
*/
this.utils = null;
/**
* Dictionary for caching VAOs.
*
* @type {Object<String,WebGLVertexArrayObject>}
*/
this.vaoCache = {};
/**
* Dictionary for caching transform feedback objects.
*
* @type {Object<String,WebGLTransformFeedback>}
*/
this.transformFeedbackCache = {};
/**
* Controls if `gl.RASTERIZER_DISCARD` should be enabled or not.
* Only relevant when using compute shaders.
*
* @type {Boolean}
* @default false
*/
this.discard = false;
/**
* A reference to the `EXT_disjoint_timer_query_webgl2` extension. `null` if the
* device does not support the extension.
*
* @type {EXTDisjointTimerQueryWebGL2?}
* @default null
*/
this.disjoint = null;
/**
* A reference to the `KHR_parallel_shader_compile` extension. `null` if the
* device does not support the extension.
*
* @type {KHRParallelShaderCompile?}
* @default null
*/
this.parallel = null;
/**
* Whether to track timestamps with a Timestamp Query API or not.
*
* @type {Boolean}
* @default false
*/
this.trackTimestamp = ( parameters.trackTimestamp === true );
/**
* A reference to the current render context.
*
* @private
* @type {RenderContext}
* @default null
*/
this._currentContext = null;
/**
* A unique collection of bindings.
*
* @private
* @type {WeakSet}
*/
this._knownBindings = new WeakSet();
/**
* The target framebuffer when rendering with
* the WebXR device API.
*
* @private
* @type {WebGLFramebuffer}
* @default null
*/
this._xrFamebuffer = null;
}
/**
* Initializes the backend so it is ready for usage.
*
* @param {Renderer} renderer - The renderer.
*/
init( renderer ) {
super.init( renderer );
//
const parameters = this.parameters;
const contextAttributes = {
antialias: false, // MSAA is applied via a custom renderbuffer
alpha: true, // always true for performance reasons
depth: false, // depth and stencil are set to false since the engine always renders into a framebuffer target first
stencil: false
};
const glContext = ( parameters.context !== undefined ) ? parameters.context : renderer.domElement.getContext( 'webgl2', contextAttributes );
function onContextLost( event ) {
event.preventDefault();
const contextLossInfo = {
api: 'WebGL',
message: event.statusMessage || 'Unknown reason',
reason: null,
originalEvent: event
};
renderer.onDeviceLost( contextLossInfo );
}
this._onContextLost = onContextLost;
renderer.domElement.addEventListener( 'webglcontextlost', onContextLost, false );
this.gl = glContext;
this.extensions = new WebGLExtensions( this );
this.capabilities = new WebGLCapabilities( this );
this.attributeUtils = new WebGLAttributeUtils( this );
this.textureUtils = new WebGLTextureUtils( this );
this.bufferRenderer = new WebGLBufferRenderer( this );
this.state = new WebGLState( this );
this.utils = new WebGLUtils( this );
this.extensions.get( 'EXT_color_buffer_float' );
this.extensions.get( 'WEBGL_clip_cull_distance' );
this.extensions.get( 'OES_texture_float_linear' );
this.extensions.get( 'EXT_color_buffer_half_float' );
this.extensions.get( 'WEBGL_multisampled_render_to_texture' );
this.extensions.get( 'WEBGL_render_shared_exponent' );
this.extensions.get( 'WEBGL_multi_draw' );
this.disjoint = this.extensions.get( 'EXT_disjoint_timer_query_webgl2' );
this.parallel = this.extensions.get( 'KHR_parallel_shader_compile' );
}
/**
* The coordinate system of the backend.
*
* @type {Number}
* @readonly
*/
get coordinateSystem() {
return WebGLCoordinateSystem;
}
/**
* This method performs a readback operation by moving buffer data from
* a storage buffer attribute from the GPU to the CPU.
*
* @async
* @param {StorageBufferAttribute} attribute - The storage buffer attribute.
* @return {Promise<ArrayBuffer>} A promise that resolves with the buffer data when the data are ready.
*/
async getArrayBufferAsync( attribute ) {
return await this.attributeUtils.getArrayBufferAsync( attribute );
}
/**
* Can be used to synchronize CPU operations with GPU tasks. So when this method is called,
* the CPU waits for the GPU to complete its operation (e.g. a compute task).
*
* @async
* @return {Promise} A Promise that resolves when synchronization has been finished.
*/
async waitForGPU() {
await this.utils._clientWaitAsync();
}
/**
* Ensures the backend is XR compatible.
*
* @async
* @return {Promise} A Promise that resolve when the renderer is XR compatible.
*/
async makeXRCompatible() {
const attributes = this.gl.getContextAttributes();
if ( attributes.xrCompatible !== true ) {
await this.gl.makeXRCompatible();
}
}
/**
* Sets the XR rendering destination.
*
* @param {WebGLFramebuffer} xrFamebuffer - The XR framebuffer.
*/
setXRTarget( xrFamebuffer ) {
this._xrFamebuffer = xrFamebuffer;
}
/**
* Configures the given XR render target with external textures.
*
* This method is only relevant when using the WebXR Layers API.
*
* @param {XRRenderTarget} renderTarget - The XR render target.
* @param {WebGLTexture} colorTexture - A native color texture.
* @param {WebGLTexture?} [depthTexture=null] - A native depth texture.
*/
setXRRenderTargetTextures( renderTarget, colorTexture, depthTexture = null ) {
const gl = this.gl;
this.set( renderTarget.texture, { textureGPU: colorTexture, glInternalFormat: gl.RGBA8 } ); // see #24698 why RGBA8 and not SRGB8_ALPHA8 is used
if ( depthTexture !== null ) {
const glInternalFormat = renderTarget.stencilBuffer ? gl.DEPTH24_STENCIL8 : gl.DEPTH_COMPONENT24;
this.set( renderTarget.depthTexture, { textureGPU: depthTexture, glInternalFormat: glInternalFormat } );
renderTarget.autoAllocateDepthBuffer = false;
// The multisample_render_to_texture extension doesn't work properly if there
// are midframe flushes and an external depth texture.
if ( this.extensions.has( 'WEBGL_multisampled_render_to_texture' ) === true ) {
console.warn( 'THREE.WebGLBackend: Render-to-texture extension was disabled because an external texture was provided' );
}
}
}
/**
* Inits a time stamp query for the given render context.
*
* @param {RenderContext} renderContext - The render context.
*/
initTimestampQuery( renderContext ) {
if ( ! this.disjoint || ! this.trackTimestamp ) return;
const type = renderContext.isComputeNode ? 'compute' : 'render';
if ( ! this.timestampQueryPool[ type ] ) {
// TODO: Variable maxQueries?
this.timestampQueryPool[ type ] = new WebGLTimestampQueryPool( this.gl, type, 2048 );
}
const timestampQueryPool = this.timestampQueryPool[ type ];
const baseOffset = timestampQueryPool.allocateQueriesForContext( renderContext );
if ( baseOffset !== null ) {
timestampQueryPool.beginQuery( renderContext );
}
}
// timestamp utils
/**
* Prepares the timestamp buffer.
*
* @param {RenderContext} renderContext - The render context.
*/
prepareTimestampBuffer( renderContext ) {
if ( ! this.disjoint || ! this.trackTimestamp ) return;
const type = renderContext.isComputeNode ? 'compute' : 'render';
const timestampQueryPool = this.timestampQueryPool[ type ];
timestampQueryPool.endQuery( renderContext );
}
/**
* Returns the backend's rendering context.
*
* @return {WebGL2RenderingContext} The rendering context.
*/
getContext() {
return this.gl;
}
/**
* This method is executed at the beginning of a render call and prepares
* the WebGL state for upcoming render calls
*
* @param {RenderContext} renderContext - The render context.
*/
beginRender( renderContext ) {
const { state, gl } = this;
const renderContextData = this.get( renderContext );
//
//
this.initTimestampQuery( renderContext );
renderContextData.previousContext = this._currentContext;
this._currentContext = renderContext;
this._setFramebuffer( renderContext );
this.clear( renderContext.clearColor, renderContext.clearDepth, renderContext.clearStencil, renderContext, false );
//
if ( renderContext.viewport ) {
this.updateViewport( renderContext );
} else {
state.viewport( 0, 0, gl.drawingBufferWidth, gl.drawingBufferHeight );
}
if ( renderContext.scissor ) {
const { x, y, width, height } = renderContext.scissorValue;
state.scissor( x, renderContext.height - height - y, width, height );
}
const occlusionQueryCount = renderContext.occlusionQueryCount;
if ( occlusionQueryCount > 0 ) {
// Get a reference to the array of objects with queries. The renderContextData property
// can be changed by another render pass before the async reading of all previous queries complete
renderContextData.currentOcclusionQueries = renderContextData.occlusionQueries;
renderContextData.currentOcclusionQueryObjects = renderContextData.occlusionQueryObjects;
renderContextData.lastOcclusionObject = null;
renderContextData.occlusionQueries = new Array( occlusionQueryCount );
renderContextData.occlusionQueryObjects = new Array( occlusionQueryCount );
renderContextData.occlusionQueryIndex = 0;
}
}
/**
* This method is executed at the end of a render call and finalizes work
* after draw calls.
*
* @param {RenderContext} renderContext - The render context.
*/
finishRender( renderContext ) {
const { gl, state } = this;
const renderContextData = this.get( renderContext );
const previousContext = renderContextData.previousContext;
const occlusionQueryCount = renderContext.occlusionQueryCount;
if ( occlusionQueryCount > 0 ) {
if ( occlusionQueryCount > renderContextData.occlusionQueryIndex ) {
gl.endQuery( gl.ANY_SAMPLES_PASSED );
}
this.resolveOccludedAsync( renderContext );
}
const textures = renderContext.textures;
if ( textures !== null ) {
for ( let i = 0; i < textures.length; i ++ ) {
const texture = textures[ i ];
if ( texture.generateMipmaps ) {
this.generateMipmaps( texture );
}
}
}
this._currentContext = previousContext;
if ( renderContext.textures !== null && renderContext.renderTarget ) {
const renderTargetContextData = this.get( renderContext.renderTarget );
const { samples } = renderContext.renderTarget;
if ( samples > 0 && this._useMultisampledRTT( renderContext.renderTarget ) === false ) {
const fb = renderTargetContextData.framebuffers[ renderContext.getCacheKey() ];
const mask = gl.COLOR_BUFFER_BIT;
const msaaFrameBuffer = renderTargetContextData.msaaFrameBuffer;
const textures = renderContext.textures;
state.bindFramebuffer( gl.READ_FRAMEBUFFER, msaaFrameBuffer );
state.bindFramebuffer( gl.DRAW_FRAMEBUFFER, fb );
for ( let i = 0; i < textures.length; i ++ ) {
// TODO Add support for MRT
if ( renderContext.scissor ) {
const { x, y, width, height } = renderContext.scissorValue;
const viewY = renderContext.height - height - y;
gl.blitFramebuffer( x, viewY, x + width, viewY + height, x, viewY, x + width, viewY + height, mask, gl.NEAREST );
gl.invalidateSubFramebuffer( gl.READ_FRAMEBUFFER, renderTargetContextData.invalidationArray, x, viewY, width, height );
} else {
gl.blitFramebuffer( 0, 0, renderContext.width, renderContext.height, 0, 0, renderContext.width, renderContext.height, mask, gl.NEAREST );
gl.invalidateFramebuffer( gl.READ_FRAMEBUFFER, renderTargetContextData.invalidationArray );
}
}
}
}
if ( previousContext !== null ) {
this._setFramebuffer( previousContext );
if ( previousContext.viewport ) {
this.updateViewport( previousContext );
} else {
state.viewport( 0, 0, gl.drawingBufferWidth, gl.drawingBufferHeight );
}
}
this.prepareTimestampBuffer( renderContext );
}
/**
* This method processes the result of occlusion queries and writes it
* into render context data.
*
* @async
* @param {RenderContext} renderContext - The render context.
*/
resolveOccludedAsync( renderContext ) {
const renderContextData = this.get( renderContext );
// handle occlusion query results
const { currentOcclusionQueries, currentOcclusionQueryObjects } = renderContextData;
if ( currentOcclusionQueries && currentOcclusionQueryObjects ) {
const occluded = new WeakSet();
const { gl } = this;
renderContextData.currentOcclusionQueryObjects = null;
renderContextData.currentOcclusionQueries = null;
const check = () => {
let completed = 0;
// check all queries and requeue as appropriate
for ( let i = 0; i < currentOcclusionQueries.length; i ++ ) {
const query = currentOcclusionQueries[ i ];
if ( query === null ) continue;
if ( gl.getQueryParameter( query, gl.QUERY_RESULT_AVAILABLE ) ) {
if ( gl.getQueryParameter( query, gl.QUERY_RESULT ) === 0 ) occluded.add( currentOcclusionQueryObjects[ i ] );
currentOcclusionQueries[ i ] = null;
gl.deleteQuery( query );
completed ++;
}
}
if ( completed < currentOcclusionQueries.length ) {
requestAnimationFrame( check );
} else {
renderContextData.occluded = occluded;
}
};
check();
}
}
/**
* Returns `true` if the given 3D object is fully occluded by other
* 3D objects in the scene.
*
* @param {RenderContext} renderContext - The render context.
* @param {Object3D} object - The 3D object to test.
* @return {Boolean} Whether the 3D object is fully occluded or not.
*/
isOccluded( renderContext, object ) {
const renderContextData = this.get( renderContext );
return renderContextData.occluded && renderContextData.occluded.has( object );
}
/**
* Updates the viewport with the values from the given render context.
*
* @param {RenderContext} renderContext - The render context.
*/
updateViewport( renderContext ) {
const { state } = this;
const { x, y, width, height } = renderContext.viewportValue;
state.viewport( x, renderContext.height - height - y, width, height );
}
/**
* Defines the scissor test.
*
* @param {Boolean} boolean - Whether the scissor test should be enabled or not.
*/
setScissorTest( boolean ) {
const state = this.state;
state.setScissorTest( boolean );
}
/**
* Performs a clear operation.
*
* @param {Boolean} color - Whether the color buffer should be cleared or not.
* @param {Boolean} depth - Whether the depth buffer should be cleared or not.
* @param {Boolean} stencil - Whether the stencil buffer should be cleared or not.
* @param {Object?} [descriptor=null] - The render context of the current set render target.
* @param {Boolean} [setFrameBuffer=true] - TODO.
*/
clear( color, depth, stencil, descriptor = null, setFrameBuffer = true ) {
const { gl } = this;
if ( descriptor === null ) {
const clearColor = this.getClearColor();
// premultiply alpha
clearColor.r *= clearColor.a;
clearColor.g *= clearColor.a;
clearColor.b *= clearColor.a;
descriptor = {
textures: null,
clearColorValue: clearColor
};
}
//
let clear = 0;
if ( color ) clear |= gl.COLOR_BUFFER_BIT;
if ( depth ) clear |= gl.DEPTH_BUFFER_BIT;
if ( stencil ) clear |= gl.STENCIL_BUFFER_BIT;
if ( clear !== 0 ) {
let clearColor;
if ( descriptor.clearColorValue ) {
clearColor = descriptor.clearColorValue;
} else {
clearColor = this.getClearColor();
// premultiply alpha
clearColor.r *= clearColor.a;
clearColor.g *= clearColor.a;
clearColor.b *= clearColor.a;
}
if ( depth ) this.state.setDepthMask( true );
if ( descriptor.textures === null ) {
gl.clearColor( clearColor.r, clearColor.g, clearColor.b, clearColor.a );
gl.clear( clear );
} else {
if ( setFrameBuffer ) this._setFramebuffer( descriptor );
if ( color ) {
for ( let i = 0; i < descriptor.textures.length; i ++ ) {
gl.clearBufferfv( gl.COLOR, i, [ clearColor.r, clearColor.g, clearColor.b, clearColor.a ] );
}
}
if ( depth && stencil ) {
gl.clearBufferfi( gl.DEPTH_STENCIL, 0, 1, 0 );
} else if ( depth ) {
gl.clearBufferfv( gl.DEPTH, 0, [ 1.0 ] );
} else if ( stencil ) {
gl.clearBufferiv( gl.STENCIL, 0, [ 0 ] );
}
}
}
}
/**
* This method is executed at the beginning of a compute call and
* prepares the state for upcoming compute tasks.
*
* @param {Node|Array<Node>} computeGroup - The compute node(s).
*/
beginCompute( computeGroup ) {
const { state, gl } = this;
state.bindFramebuffer( gl.FRAMEBUFFER, null );
this.initTimestampQuery( computeGroup );
}
/**
* Executes a compute command for the given compute node.
*
* @param {Node|Array<Node>} computeGroup - The group of compute nodes of a compute call. Can be a single compute node.
* @param {Node} computeNode - The compute node.
* @param {Array<BindGroup>} bindings - The bindings.
* @param {ComputePipeline} pipeline - The compute pipeline.
*/
compute( computeGroup, computeNode, bindings, pipeline ) {
const { state, gl } = this;
if ( this.discard === false ) {
// required here to handle async behaviour of render.compute()
gl.enable( gl.RASTERIZER_DISCARD );
this.discard = true;
}
const { programGPU, transformBuffers, attributes } = this.get( pipeline );
const vaoKey = this._getVaoKey( null, attributes );
const vaoGPU = this.vaoCache[ vaoKey ];
if ( vaoGPU === undefined ) {
this._createVao( null, attributes );
} else {
gl.bindVertexArray( vaoGPU );
}
state.useProgram( programGPU );
this._bindUniforms( bindings );
const transformFeedbackGPU = this._getTransformFeedback( transformBuffers );
gl.bindTransformFeedback( gl.TRANSFORM_FEEDBACK, transformFeedbackGPU );
gl.beginTransformFeedback( gl.POINTS );
if ( attributes[ 0 ].isStorageInstancedBufferAttribute ) {
gl.drawArraysInstanced( gl.POINTS, 0, 1, computeNode.count );
} else {
gl.drawArrays( gl.POINTS, 0, computeNode.count );
}
gl.endTransformFeedback();
gl.bindTransformFeedback( gl.TRANSFORM_FEEDBACK, null );
// switch active buffers
for ( let i = 0; i < transformBuffers.length; i ++ ) {
const dualAttributeData = transformBuffers[ i ];
if ( dualAttributeData.pbo ) {
this.textureUtils.copyBufferToTexture( dualAttributeData.transformBuffer, dualAttributeData.pbo );
}
dualAttributeData.switchBuffers();
}
}
/**
* This method is executed at the end of a compute call and
* finalizes work after compute tasks.
*
* @param {Node|Array<Node>} computeGroup - The compute node(s).
*/
finishCompute( computeGroup ) {
const gl = this.gl;
this.discard = false;
gl.disable( gl.RASTERIZER_DISCARD );
this.prepareTimestampBuffer( computeGroup );
if ( this._currentContext ) {
this._setFramebuffer( this._currentContext );
}
}
/**
* Executes a draw command for the given render object.
*
* @param {RenderObject} renderObject - The render object to draw.
* @param {Info} info - Holds a series of statistical information about the GPU memory and the rendering process.
*/
draw( renderObject/*, info*/ ) {
const { object, pipeline, material, context, hardwareClippingPlanes } = renderObject;
const { programGPU } = this.get( pipeline );
const { gl, state } = this;
const contextData = this.get( context );
const drawParams = renderObject.getDrawParameters();
if ( drawParams === null ) return;
//
this._bindUniforms( renderObject.getBindings() );
const frontFaceCW = ( object.isMesh && object.matrixWorld.determinant() < 0 );
state.setMaterial( material, frontFaceCW, hardwareClippingPlanes );
state.useProgram( programGPU );
//
const renderObjectData = this.get( renderObject );
let vaoGPU = renderObjectData.staticVao;
if ( vaoGPU === undefined || renderObjectData.geometryId !== renderObject.geometry.id ) {
const vaoKey = this._getVaoKey( renderObject.getIndex(), renderObject.getAttributes() );
vaoGPU = this.vaoCache[ vaoKey ];
if ( vaoGPU === undefined ) {
let staticVao;
( { vaoGPU, staticVao } = this._createVao( renderObject.getIndex(), renderObject.getAttributes() ) );
if ( staticVao ) {
renderObjectData.staticVao = vaoGPU;
renderObjectData.geometryId = renderObject.geometry.id;
}
}
}
gl.bindVertexArray( vaoGPU );
//
const index = renderObject.getIndex();
//
const lastObject = contextData.lastOcclusionObject;
if ( lastObject !== object && lastObject !== undefined ) {
if ( lastObject !== null && lastObject.occlusionTest === true ) {
gl.endQuery( gl.ANY_SAMPLES_PASSED );
contextData.occlusionQueryIndex ++;
}
if ( object.occlusionTest === true ) {
const query = gl.createQuery();
gl.beginQuery( gl.ANY_SAMPLES_PASSED, query );
contextData.occlusionQueries[ contextData.occlusionQueryIndex ] = query;
contextData.occlusionQueryObjects[ contextData.occlusionQueryIndex ] = object;
}
contextData.lastOcclusionObject = object;
}
//
const renderer = this.bufferRenderer;
if ( object.isPoints ) renderer.mode = gl.POINTS;
else if ( object.isLineSegments ) renderer.mode = gl.LINES;
else if ( object.isLine ) renderer.mode = gl.LINE_STRIP;
else if ( object.isLineLoop ) renderer.mode = gl.LINE_LOOP;
else {
if ( material.wireframe === true ) {
state.setLineWidth( material.wireframeLinewidth * this.renderer.getPixelRatio() );
renderer.mode = gl.LINES;
} else {
renderer.mode = gl.TRIANGLES;
}
}
//
const { vertexCount, instanceCount } = drawParams;
let { firstVertex } = drawParams;
renderer.object = object;
if ( index !== null ) {
firstVertex *= index.array.BYTES_PER_ELEMENT;
const indexData = this.get( index );
renderer.index = index.count;
renderer.type = indexData.type;
} else {
renderer.index = 0;
}
const draw = () => {
if ( object.isBatchedMesh ) {
if ( object._multiDrawInstances !== null ) {
renderer.renderMultiDrawInstances( object._multiDrawStarts, object._multiDrawCounts, object._multiDrawCount, object._multiDrawInstances );
} else if ( ! this.hasFeature( 'WEBGL_multi_draw' ) ) {
warnOnce( 'THREE.WebGLRenderer: WEBGL_multi_draw not supported.' );
} else {
renderer.renderMultiDraw( object._multiDrawStarts, object._multiDrawCounts, object._multiDrawCount );
}
} else if ( instanceCount > 1 ) {
renderer.renderInstances( firstVertex, vertexCount, instanceCount );
} else {
renderer.render( firstVertex, vertexCount );
}
};
if ( renderObject.camera.isArrayCamera && renderObject.camera.cameras.length > 0 ) {
const cameraData = this.get( renderObject.camera );
const cameras = renderObject.camera.cameras;
const cameraIndex = renderObject.getBindingGroup( 'cameraIndex' ).bindings[ 0 ];
if ( cameraData.indexesGPU === undefined || cameraData.indexesGPU.length !== cameras.length ) {
const data = new Uint32Array( [ 0, 0, 0, 0 ] );
const indexesGPU = [];
for ( let i = 0, len = cameras.length; i < len; i ++ ) {
const bufferGPU = gl.createBuffer();
data[ 0 ] = i;
gl.bindBuffer( gl.UNIFORM_BUFFER, bufferGPU );
gl.bufferData( gl.UNIFORM_BUFFER, data, gl.STATIC_DRAW );
indexesGPU.push( bufferGPU );
}
cameraData.indexesGPU = indexesGPU; // TODO: Create a global library for this
}
const cameraIndexData = this.get( cameraIndex );
const pixelRatio = this.renderer.getPixelRatio();
for ( let i = 0, len = cameras.length; i < len; i ++ ) {
const subCamera = cameras[ i ];
if ( object.layers.test( subCamera.layers ) ) {
const vp = subCamera.viewport;
const x = vp.x * pixelRatio;
const y = vp.y * pixelRatio;
const width = vp.width * pixelRatio;
const height = vp.height * pixelRatio;
state.viewport(
Math.floor( x ),
Math.floor( renderObject.context.height - height - y ),
Math.floor( width ),
Math.floor( height )
);
state.bindBufferBase( gl.UNIFORM_BUFFER, cameraIndexData.index, cameraData.indexesGPU[ i ] );
draw();
}
}
} else {
draw();
}
//
gl.bindVertexArray( null );
}
/**
* Explain why always null is returned.
*
* @param {RenderObject} renderObject - The render object.
* @return {Boolean} Whether the render pipeline requires an update or not.
*/
needsRenderUpdate( /*renderObject*/ ) {
return false;
}
/**
* Explain why no cache key is computed.
*
* @param {RenderObject} renderObject - The render object.
* @return {String} The cache key.
*/
getRenderCacheKey( /*renderObject*/ ) {
return '';
}
// textures
/**
* Creates a default texture for the given texture that can be used
* as a placeholder until the actual texture is ready for usage.
*
* @param {Texture} texture - The texture to create a default texture for.
*/
createDefaultTexture( texture ) {
this.textureUtils.createDefaultTexture( texture );
}
/**
* Defines a texture on the GPU for the given texture object.
*
* @param {Texture} texture - The texture.
* @param {Object} [options={}] - Optional configuration parameter.
*/
createTexture( texture, options ) {
this.textureUtils.createTexture( texture, options );
}
/**
* Uploads the updated texture data to the GPU.
*
* @param {Texture} texture - The texture.
* @param {Object} [options={}] - Optional configuration parameter.
*/
updateTexture( texture, options ) {
this.textureUtils.updateTexture( texture, options );
}
/**
* Generates mipmaps for the given texture.
*
* @param {Texture} texture - The texture.
*/
generateMipmaps( texture ) {
this.textureUtils.generateMipmaps( texture );
}
/**
* Destroys the GPU data for the given texture object.
*
* @param {Texture} texture - The texture.
*/
destroyTexture( texture ) {
this.textureUtils.destroyTexture( texture );
}
/**
* Returns texture data as a typed array.
*
* @async
* @param {Texture} texture - The texture to copy.
* @param {Number} x - The x coordinate of the copy origin.
* @param {Number} y - The y coordinate of the copy origin.
* @param {Number} width - The width of the copy.
* @param {Number} height - The height of the copy.
* @param {Number} faceIndex - The face index.
* @return {Promise<TypedArray>} A Promise that resolves with a typed array when the copy operation has finished.
*/
async copyTextureToBuffer( texture, x, y, width, height, faceIndex ) {
return this.textureUtils.copyTextureToBuffer( texture, x, y, width, height, faceIndex );
}
/**
* This method does nothing since WebGL 2 has no concept of samplers.
*
* @param {Texture} texture - The texture to create the sampler for.
*/
createSampler( /*texture*/ ) {
//console.warn( 'Abstract class.' );
}
/**
* This method does nothing since WebGL 2 has no concept of samplers.
*
* @param {Texture} texture - The texture to destroy the sampler for.
*/
destroySampler( /*texture*/ ) {}
// node builder
/**
* Returns a node builder for the given render object.
*
* @param {RenderObject} object - The render object.
* @param {Renderer} renderer - The renderer.
* @return {GLSLNodeBuilder} The node builder.
*/
createNodeBuilder( object, renderer ) {
return new GLSLNodeBuilder( object, renderer );
}
// program
/**
* Creates a shader program from the given programmable stage.
*
* @param {ProgrammableStage} program - The programmable stage.
*/
createProgram( program ) {
const gl = this.gl;
const { stage, code } = program;
const shader = stage === 'fragment' ? gl.createShader( gl.FRAGMENT_SHADER ) : gl.createShader( gl.VERTEX_SHADER );
gl.shaderSource( shader, code );
gl.compileShader( shader );
this.set( program, {
shaderGPU: shader
} );
}
/**
* Destroys the shader program of the given programmable stage.
*
* @param {ProgrammableStage} program - The programmable stage.
*/
destroyProgram( program ) {
this.delete( program );
}
/**
* Creates a render pipeline for the given render object.
*
* @param {RenderObject} renderObject - The render object.
* @param {Array<Promise>} promises - An array of compilation promises which are used in `compileAsync()`.
*/
createRenderPipeline( renderObject, promises ) {
const gl = this.gl;
const pipeline = renderObject.pipeline;
// Program
const { fragmentProgram, vertexProgram } = pipeline;
const programGPU = gl.createProgram();
const fragmentShader = this.get( fragmentProgram ).shaderGPU;
const vertexShader = this.get( vertexProgram ).shaderGPU;
gl.attachShader( programGPU, fragmentShader );
gl.attachShader( programGPU, vertexShader );
gl.linkProgram( programGPU );
this.set( pipeline, {
programGPU,
fragmentShader,
vertexShader
} );
if ( promises !== null && this.parallel ) {
const p = new Promise( ( resolve /*, reject*/ ) => {
const parallel = this.parallel;
const checkStatus = () => {
if ( gl.getProgramParameter( programGPU, parallel.COMPLETION_STATUS_KHR ) ) {
this._completeCompile( renderObject, pipeline );
resolve();
} else {
requestAnimationFrame( checkStatus );
}
};
checkStatus();
} );
promises.push( p );
return;
}
this._completeCompile( renderObject, pipeline );
}
/**
* Formats the source code of error messages.
*
* @private
* @param {String} string - The code.
* @param {Number} errorLine - The error line.
* @return {String} The formatted code.
*/
_handleSource( string, errorLine ) {
const lines = string.split( '\n' );
const lines2 = [];
const from = Math.max( errorLine - 6, 0 );
const to = Math.min( errorLine + 6, lines.length );
for ( let i = from; i < to; i ++ ) {
const line = i + 1;
lines2.push( `${line === errorLine ? '>' : ' '} ${line}: ${lines[ i ]}` );
}
return lines2.join( '\n' );
}
/**
* Gets the shader compilation errors from the info log.
*
* @private
* @param {WebGL2RenderingContext} gl - The rendering context.
* @param {WebGLShader} shader - The WebGL shader object.
* @param {String} type - The shader type.
* @return {String} The shader errors.
*/
_getShaderErrors( gl, shader, type ) {
const status = gl.getShaderParameter( shader, gl.COMPILE_STATUS );
const errors = gl.getShaderInfoLog( shader ).trim();
if ( status && errors === '' ) return '';
const errorMatches = /ERROR: 0:(\d+)/.exec( errors );
if ( errorMatches ) {
const errorLine = parseInt( errorMatches[ 1 ] );
return type.toUpperCase() + '\n\n' + errors + '\n\n' + this._handleSource( gl.getShaderSource( shader ), errorLine );
} else {
return errors;
}
}
/**
* Logs shader compilation errors.
*
* @private
* @param {WebGLProgram} programGPU - The WebGL program.
* @param {WebGLShader} glFragmentShader - The fragment shader as a native WebGL shader object.
* @param {WebGLShader} glVertexShader - The vertex shader as a native WebGL shader object.
*/
_logProgramError( programGPU, glFragmentShader, glVertexShader ) {
if ( this.renderer.debug.checkShaderErrors ) {
const gl = this.gl;
const programLog = gl.getProgramInfoLog( programGPU ).trim();
if ( gl.getProgramParameter( programGPU, gl.LINK_STATUS ) === false ) {
if ( typeof this.renderer.debug.onShaderError === 'function' ) {
this.renderer.debug.onShaderError( gl, programGPU, glVertexShader, glFragmentShader );
} else {
// default error reporting
const vertexErrors = this._getShaderErrors( gl, glVertexShader, 'vertex' );
const fragmentErrors = this._getShaderErrors( gl, glFragmentShader, 'fragment' );
console.error(
'THREE.WebGLProgram: Shader Error ' + gl.getError() + ' - ' +
'VALIDATE_STATUS ' + gl.getProgramParameter( programGPU, gl.VALIDATE_STATUS ) + '\n\n' +
'Program Info Log: ' + programLog + '\n' +
vertexErrors + '\n' +
fragmentErrors
);
}
} else if ( programLog !== '' ) {
console.warn( 'THREE.WebGLProgram: Program Info Log:', programLog );
}
}
}
/**
* Completes the shader program setup for the given render object.
*
* @private
* @param {RenderObject} renderObject - The render object.
* @param {RenderPipeline} pipeline - The render pipeline.
*/
_completeCompile( renderObject, pipeline ) {
const { state, gl } = this;
const pipelineData = this.get( pipeline );
const { programGPU, fragmentShader, vertexShader } = pipelineData;
if ( gl.getProgramParameter( programGPU, gl.LINK_STATUS ) === false ) {
this._logProgramError( programGPU, fragmentShader, vertexShader );
}
state.useProgram( programGPU );
// Bindings
const bindings = renderObject.getBindings();
this._setupBindings( bindings, programGPU );
//
this.set( pipeline, {
programGPU
} );
}
/**
* Creates a compute pipeline for the given compute node.
*
* @param {ComputePipeline} computePipeline - The compute pipeline.
* @param {Array<BindGroup>} bindings - The bindings.
*/
createComputePipeline( computePipeline, bindings ) {
const { state, gl } = this;
// Program
const fragmentProgram = {
stage: 'fragment',
code: '#version 300 es\nprecision highp float;\nvoid main() {}'
};
this.createProgram( fragmentProgram );
const { computeProgram } = computePipeline;
const programGPU = gl.createProgram();
const fragmentShader = this.get( fragmentProgram ).shaderGPU;
const vertexShader = this.get( computeProgram ).shaderGPU;
const transforms = computeProgram.transforms;
const transformVaryingNames = [];
const transformAttributeNodes = [];
for ( let i = 0; i < transforms.length; i ++ ) {
const transform = transforms[ i ];
transformVaryingNames.push( transform.varyingName );
transformAttributeNodes.push( transform.attributeNode );
}
gl.attachShader( programGPU, fragmentShader );
gl.attachShader( programGPU, vertexShader );
gl.transformFeedbackVaryings(
programGPU,
transformVaryingNames,
gl.SEPARATE_ATTRIBS
);
gl.linkProgram( programGPU );
if ( gl.getProgramParameter( programGPU, gl.LINK_STATUS ) === false ) {
this._logProgramError( programGPU, fragmentShader, vertexShader );
}
state.useProgram( programGPU );
// Bindings
this._setupBindings( bindings, programGPU );
const attributeNodes = computeProgram.attributes;
const attributes = [];
const transformBuffers = [];
for ( let i = 0; i < attributeNodes.length; i ++ ) {
const attribute = attributeNodes[ i ].node.attribute;
attributes.push( attribute );
if ( ! this.has( attribute ) ) this.attributeUtils.createAttribute( attribute, gl.ARRAY_BUFFER );
}
for ( let i = 0; i < transformAttributeNodes.length; i ++ ) {
const attribute = transformAttributeNodes[ i ].attribute;
if ( ! this.has( attribute ) ) this.attributeUtils.createAttribute( attribute, gl.ARRAY_BUFFER );
const attributeData = this.get( attribute );
transformBuffers.push( attributeData );
}
//
this.set( computePipeline, {
programGPU,
transformBuffers,
attributes
} );
}
/**
* Creates bindings from the given bind group definition.
*
* @param {BindGroup} bindGroup - The bind group.
* @param {Array<BindGroup>} bindings - Array of bind groups.
* @param {Number} cacheIndex - The cache index.
* @param {Number} version - The version.
*/
createBindings( bindGroup, bindings /*, cacheIndex, version*/ ) {
if ( this._knownBindings.has( bindings ) === false ) {
this._knownBindings.add( bindings );
let uniformBuffers = 0;
let textures = 0;
for ( const bindGroup of bindings ) {
this.set( bindGroup, {
textures: textures,
uniformBuffers: uniformBuffers
} );
for ( const binding of bindGroup.bindings ) {
if ( binding.isUniformBuffer ) uniformBuffers ++;
if ( binding.isSampledTexture ) textures ++;
}
}
}
this.updateBindings( bindGroup, bindings );
}
/**
* Updates the given bind group definition.
*
* @param {BindGroup} bindGroup - The bind group.
* @param {Array<BindGroup>} bindings - Array of bind groups.
* @param {Number} cacheIndex - The cache index.
* @param {Number} version - The version.
*/
updateBindings( bindGroup /*, bindings, cacheIndex, version*/ ) {
const { gl } = this;
const bindGroupData = this.get( bindGroup );
let i = bindGroupData.uniformBuffers;
let t = bindGroupData.textures;
for ( const binding of bindGroup.bindings ) {
if ( binding.isUniformsGroup || binding.isUniformBuffer ) {
const data = binding.buffer;
const bufferGPU = gl.createBuffer();
gl.bindBuffer( gl.UNIFORM_BUFFER, bufferGPU );
gl.bufferData( gl.UNIFORM_BUFFER, data, gl.DYNAMIC_DRAW );
this.set( binding, {
index: i ++,
bufferGPU
} );
} else if ( binding.isSampledTexture ) {
const { textureGPU, glTextureType } = this.get( binding.texture );
this.set( binding, {
index: t ++,
textureGPU,
glTextureType
} );
}
}
}
/**
* Updates a buffer binding.
*
* @param {Buffer} binding - The buffer binding to update.
*/
updateBinding( binding ) {
const gl = this.gl;
if ( binding.isUniformsGroup || binding.isUniformBuffer ) {
const bindingData = this.get( binding );
const bufferGPU = bindingData.bufferGPU;
const data = binding.buffer;
gl.bindBuffer( gl.UNIFORM_BUFFER, bufferGPU );
gl.bufferData( gl.UNIFORM_BUFFER, data, gl.DYNAMIC_DRAW );
}
}
// attributes
/**
* Creates the GPU buffer of an indexed shader attribute.
*
* @param {BufferAttribute} attribute - The indexed buffer attribute.
*/
createIndexAttribute( attribute ) {
const gl = this.gl;
this.attributeUtils.createAttribute( attribute, gl.ELEMENT_ARRAY_BUFFER );
}
/**
* Creates the GPU buffer of a shader attribute.
*
* @param {BufferAttribute} attribute - The buffer attribute.
*/
createAttribute( attribute ) {
if ( this.has( attribute ) ) return;
const gl = this.gl;
this.attributeUtils.createAttribute( attribute, gl.ARRAY_BUFFER );
}
/**
* Creates the GPU buffer of a storage attribute.
*
* @param {BufferAttribute} attribute - The buffer attribute.
*/
createStorageAttribute( attribute ) {
if ( this.has( attribute ) ) return;
const gl = this.gl;
this.attributeUtils.createAttribute( attribute, gl.ARRAY_BUFFER );
}
/**
* Updates the GPU buffer of a shader attribute.
*
* @param {BufferAttribute} attribute - The buffer attribute to update.
*/
updateAttribute( attribute ) {
this.attributeUtils.updateAttribute( attribute );
}
/**
* Destroys the GPU buffer of a shader attribute.
*
* @param {BufferAttribute} attribute - The buffer attribute to destroy.
*/
destroyAttribute( attribute ) {
this.attributeUtils.destroyAttribute( attribute );
}
/**
* Checks if the given feature is supported by the backend.
*
* @param {String} name - The feature's name.
* @return {Boolean} Whether the feature is supported or not.
*/
hasFeature( name ) {
const keysMatching = Object.keys( GLFeatureName ).filter( key => GLFeatureName[ key ] === name );
const extensions = this.extensions;
for ( let i = 0; i < keysMatching.length; i ++ ) {
if ( extensions.has( keysMatching[ i ] ) ) return true;
}
return false;
}
/**
* Returns the maximum anisotropy texture filtering value.
*
* @return {Number} The maximum anisotropy texture filtering value.
*/
getMaxAnisotropy() {
return this.capabilities.getMaxAnisotropy();
}
/**
* Copies data of the given source texture to the given destination texture.
*
* @param {Texture} srcTexture - The source texture.
* @param {Texture} dstTexture - The destination texture.
* @param {Vector4?} [srcRegion=null] - The region of the source texture to copy.
* @param {(Vector2|Vector3)?} [dstPosition=null] - The destination position of the copy.
* @param {Number} [level=0] - The mip level to copy.
*/
copyTextureToTexture( srcTexture, dstTexture, srcRegion = null, dstPosition = null, level = 0 ) {
this.textureUtils.copyTextureToTexture( srcTexture, dstTexture, srcRegion, dstPosition, level );
}
/**
* Copies the current bound framebuffer to the given texture.
*
* @param {Texture} texture - The destination texture.
* @param {RenderContext} renderContext - The render context.
* @param {Vector4} rectangle - A four dimensional vector defining the origin and dimension of the copy.
*/
copyFramebufferToTexture( texture, renderContext, rectangle ) {
this.textureUtils.copyFramebufferToTexture( texture, renderContext, rectangle );
}
/**
* Configures the active framebuffer from the given render context.
*
* @private
* @param {RenderContext} descriptor - The render context.
*/
_setFramebuffer( descriptor ) {
const { gl, state } = this;
let currentFrameBuffer = null;
if ( descriptor.textures !== null ) {
const renderTarget = descriptor.renderTarget;
const renderTargetContextData = this.get( renderTarget );
const { samples, depthBuffer, stencilBuffer } = renderTarget;
const isCube = renderTarget.isWebGLCubeRenderTarget === true;
const isRenderTarget3D = renderTarget.isRenderTarget3D === true;
const isRenderTargetArray = renderTarget.isRenderTargetArray === true;
const isXRRenderTarget = renderTarget.isXRRenderTarget === true;
const hasExternalTextures = ( isXRRenderTarget === true && renderTarget.hasExternalTextures === true );
let msaaFb = renderTargetContextData.msaaFrameBuffer;
let depthRenderbuffer = renderTargetContextData.depthRenderbuffer;
const multisampledRTTExt = this.extensions.get( 'WEBGL_multisampled_render_to_texture' );
const useMultisampledRTT = this._useMultisampledRTT( renderTarget );
const cacheKey = getCacheKey( descriptor );
let fb;
if ( isCube ) {
renderTargetContextData.cubeFramebuffers || ( renderTargetContextData.cubeFramebuffers = {} );
fb = renderTargetContextData.cubeFramebuffers[ cacheKey ];
} else if ( isXRRenderTarget && hasExternalTextures === false ) {
fb = this._xrFamebuffer;
} else {
renderTargetContextData.framebuffers || ( renderTargetContextData.framebuffers = {} );
fb = renderTargetContextData.framebuffers[ cacheKey ];
}
if ( fb === undefined ) {
fb = gl.createFramebuffer();
state.bindFramebuffer( gl.FRAMEBUFFER, fb );
const textures = descriptor.textures;
if ( isCube ) {
renderTargetContextData.cubeFramebuffers[ cacheKey ] = fb;
const { textureGPU } = this.get( textures[ 0 ] );
const cubeFace = this.renderer._activeCubeFace;
gl.framebufferTexture2D( gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_CUBE_MAP_POSITIVE_X + cubeFace, textureGPU, 0 );
} else {
renderTargetContextData.framebuffers[ cacheKey ] = fb;
for ( let i = 0; i < textures.length; i ++ ) {
const texture = textures[ i ];
const textureData = this.get( texture );
textureData.renderTarget = descriptor.renderTarget;
textureData.cacheKey = cacheKey; // required for copyTextureToTexture()
const attachment = gl.COLOR_ATTACHMENT0 + i;
if ( isRenderTarget3D || isRenderTargetArray ) {
const layer = this.renderer._activeCubeFace;
gl.framebufferTextureLayer( gl.FRAMEBUFFER, attachment, textureData.textureGPU, 0, layer );
} else {
if ( useMultisampledRTT ) {
multisampledRTTExt.framebufferTexture2DMultisampleEXT( gl.FRAMEBUFFER, attachment, gl.TEXTURE_2D, textureData.textureGPU, 0, samples );
} else {
gl.framebufferTexture2D( gl.FRAMEBUFFER, attachment, gl.TEXTURE_2D, textureData.textureGPU, 0 );
}
}
}
state.drawBuffers( descriptor, fb );
}
if ( renderTarget.isXRRenderTarget && renderTarget.autoAllocateDepthBuffer === true ) {
const renderbuffer = gl.createRenderbuffer();
this.textureUtils.setupRenderBufferStorage( renderbuffer, descriptor, 0, useMultisampledRTT );
renderTargetContextData.xrDepthRenderbuffer = renderbuffer;
} else {
if ( descriptor.depthTexture !== null ) {
const textureData = this.get( descriptor.depthTexture );
const depthStyle = stencilBuffer ? gl.DEPTH_STENCIL_ATTACHMENT : gl.DEPTH_ATTACHMENT;
textureData.renderTarget = descriptor.renderTarget;
textureData.cacheKey = cacheKey; // required for copyTextureToTexture()
if ( useMultisampledRTT ) {
multisampledRTTExt.framebufferTexture2DMultisampleEXT( gl.FRAMEBUFFER, depthStyle, gl.TEXTURE_2D, textureData.textureGPU, 0, samples );
} else {
gl.framebufferTexture2D( gl.FRAMEBUFFER, depthStyle, gl.TEXTURE_2D, textureData.textureGPU, 0 );
}
}
}
} else {
// rebind external XR textures
if ( isXRRenderTarget && hasExternalTextures ) {
state.bindFramebuffer( gl.FRAMEBUFFER, fb );
// rebind color
const textureData = this.get( descriptor.textures[ 0 ] );
if ( useMultisampledRTT ) {
multisampledRTTExt.framebufferTexture2DMultisampleEXT( gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, textureData.textureGPU, 0, samples );
} else {
gl.framebufferTexture2D( gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, textureData.textureGPU, 0 );
}
// rebind depth
const depthStyle = stencilBuffer ? gl.DEPTH_STENCIL_ATTACHMENT : gl.DEPTH_ATTACHMENT;
if ( renderTarget.autoAllocateDepthBuffer === true ) {
const renderbuffer = renderTargetContextData.xrDepthRenderbuffer;
gl.bindRenderbuffer( gl.RENDERBUFFER, renderbuffer );
gl.framebufferRenderbuffer( gl.FRAMEBUFFER, depthStyle, gl.RENDERBUFFER, renderbuffer );
} else {
const textureData = this.get( descriptor.depthTexture );
if ( useMultisampledRTT ) {
multisampledRTTExt.framebufferTexture2DMultisampleEXT( gl.FRAMEBUFFER, depthStyle, gl.TEXTURE_2D, textureData.textureGPU, 0, samples );
} else {
gl.framebufferTexture2D( gl.FRAMEBUFFER, depthStyle, gl.TEXTURE_2D, textureData.textureGPU, 0 );
}
}
}
}
if ( samples > 0 && useMultisampledRTT === false ) {
if ( msaaFb === undefined ) {
const invalidationArray = [];
msaaFb = gl.createFramebuffer();
state.bindFramebuffer( gl.FRAMEBUFFER, msaaFb );
const msaaRenderbuffers = [];
const textures = descriptor.textures;
for ( let i = 0; i < textures.length; i ++ ) {
msaaRenderbuffers[ i ] = gl.createRenderbuffer();
gl.bindRenderbuffer( gl.RENDERBUFFER, msaaRenderbuffers[ i ] );
invalidationArray.push( gl.COLOR_ATTACHMENT0 + i );
if ( depthBuffer ) {
const depthStyle = stencilBuffer ? gl.DEPTH_STENCIL_ATTACHMENT : gl.DEPTH_ATTACHMENT;
invalidationArray.push( depthStyle );
}
const texture = descriptor.textures[ i ];
const textureData = this.get( texture );
gl.renderbufferStorageMultisample( gl.RENDERBUFFER, samples, textureData.glInternalFormat, descriptor.width, descriptor.height );
gl.framebufferRenderbuffer( gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0 + i, gl.RENDERBUFFER, msaaRenderbuffers[ i ] );
}
renderTargetContextData.msaaFrameBuffer = msaaFb;
renderTargetContextData.msaaRenderbuffers = msaaRenderbuffers;
if ( depthRenderbuffer === undefined ) {
depthRenderbuffer = gl.createRenderbuffer();
this.textureUtils.setupRenderBufferStorage( depthRenderbuffer, descriptor, samples );
renderTargetContextData.depthRenderbuffer = depthRenderbuffer;
const depthStyle = stencilBuffer ? gl.DEPTH_STENCIL_ATTACHMENT : gl.DEPTH_ATTACHMENT;
invalidationArray.push( depthStyle );
}
renderTargetContextData.invalidationArray = invalidationArray;
}
currentFrameBuffer = renderTargetContextData.msaaFrameBuffer;
} else {
currentFrameBuffer = fb;
}
}
state.bindFramebuffer( gl.FRAMEBUFFER, currentFrameBuffer );
}
/**
* Computes the VAO key for the given index and attributes.
*
* @private
* @param {BufferAttribute?} index - The index. `null` for non-indexed geometries.
* @param {Array<BufferAttribute>} attributes - An array of buffer attributes.
* @return {String} The VAO key.
*/
_getVaoKey( index, attributes ) {
let key = '';
if ( index !== null ) {
const indexData = this.get( index );
key += ':' + indexData.id;
}
for ( let i = 0; i < attributes.length; i ++ ) {
const attributeData = this.get( attributes[ i ] );
key += ':' + attributeData.id;
}
return key;
}
/**
* Creates a VAO from the index and attributes.
*
* @private
* @param {BufferAttribute?} index - The index. `null` for non-indexed geometries.
* @param {Array<BufferAttribute>} attributes - An array of buffer attributes.
* @return {Object} The VAO data.
*/
_createVao( index, attributes ) {
const { gl } = this;
const vaoGPU = gl.createVertexArray();
let key = '';
let staticVao = true;
gl.bindVertexArray( vaoGPU );
if ( index !== null ) {
const indexData = this.get( index );
gl.bindBuffer( gl.ELEMENT_ARRAY_BUFFER, indexData.bufferGPU );
key += ':' + indexData.id;
}
for ( let i = 0; i < attributes.length; i ++ ) {
const attribute = attributes[ i ];
const attributeData = this.get( attribute );
key += ':' + attributeData.id;
gl.bindBuffer( gl.ARRAY_BUFFER, attributeData.bufferGPU );
gl.enableVertexAttribArray( i );
if ( attribute.isStorageBufferAttribute || attribute.isStorageInstancedBufferAttribute ) staticVao = false;
let stride, offset;
if ( attribute.isInterleavedBufferAttribute === true ) {
stride = attribute.data.stride * attributeData.bytesPerElement;
offset = attribute.offset * attributeData.bytesPerElement;
} else {
stride = 0;
offset = 0;
}
if ( attributeData.isInteger ) {
gl.vertexAttribIPointer( i, attribute.itemSize, attributeData.type, stride, offset );
} else {
gl.vertexAttribPointer( i, attribute.itemSize, attributeData.type, attribute.normalized, stride, offset );
}
if ( attribute.isInstancedBufferAttribute && ! attribute.isInterleavedBufferAttribute ) {
gl.vertexAttribDivisor( i, attribute.meshPerAttribute );
} else if ( attribute.isInterleavedBufferAttribute && attribute.data.isInstancedInterleavedBuffer ) {
gl.vertexAttribDivisor( i, attribute.data.meshPerAttribute );
}
}
gl.bindBuffer( gl.ARRAY_BUFFER, null );
this.vaoCache[ key ] = vaoGPU;
return { vaoGPU, staticVao };
}
/**
* Creates a transform feedback from the given transform buffers.
*
* @private
* @param {Array<DualAttributeData>} transformBuffers - The transform buffers.
* @return {WebGLTransformFeedback} The transform feedback.
*/
_getTransformFeedback( transformBuffers ) {
let key = '';
for ( let i = 0; i < transformBuffers.length; i ++ ) {
key += ':' + transformBuffers[ i ].id;
}
let transformFeedbackGPU = this.transformFeedbackCache[ key ];
if ( transformFeedbackGPU !== undefined ) {
return transformFeedbackGPU;
}
const { gl } = this;
transformFeedbackGPU = gl.createTransformFeedback();
gl.bindTransformFeedback( gl.TRANSFORM_FEEDBACK, transformFeedbackGPU );
for ( let i = 0; i < transformBuffers.length; i ++ ) {
const attributeData = transformBuffers[ i ];
gl.bindBufferBase( gl.TRANSFORM_FEEDBACK_BUFFER, i, attributeData.transformBuffer );
}
gl.bindTransformFeedback( gl.TRANSFORM_FEEDBACK, null );
this.transformFeedbackCache[ key ] = transformFeedbackGPU;
return transformFeedbackGPU;
}
/**
* Setups the given bindings.
*
* @private
* @param {Array<BindGroup>} bindings - The bindings.
* @param {WebGLProgram} programGPU - The WebGL program.
*/
_setupBindings( bindings, programGPU ) {
const gl = this.gl;
for ( const bindGroup of bindings ) {
for ( const binding of bindGroup.bindings ) {
const bindingData = this.get( binding );
const index = bindingData.index;
if ( binding.isUniformsGroup || binding.isUniformBuffer ) {
const location = gl.getUniformBlockIndex( programGPU, binding.name );
gl.uniformBlockBinding( programGPU, location, index );
} else if ( binding.isSampledTexture ) {
const location = gl.getUniformLocation( programGPU, binding.name );
gl.uniform1i( location, index );
}
}
}
}
/**
* Binds the given uniforms.
*
* @private
* @param {Array<BindGroup>} bindings - The bindings.
*/
_bindUniforms( bindings ) {
const { gl, state } = this;
for ( const bindGroup of bindings ) {
for ( const binding of bindGroup.bindings ) {
const bindingData = this.get( binding );
const index = bindingData.index;
if ( binding.isUniformsGroup || binding.isUniformBuffer ) {
// TODO USE bindBufferRange to group multiple uniform buffers
state.bindBufferBase( gl.UNIFORM_BUFFER, index, bindingData.bufferGPU );
} else if ( binding.isSampledTexture ) {
state.bindTexture( bindingData.glTextureType, bindingData.textureGPU, gl.TEXTURE0 + index );
}
}
}
}
/**
* Returns `true` if the `WEBGL_multisampled_render_to_texture` extension
* should be used when MSAA is enabled.
*
* @private
* @param {RenderTarget} renderTarget - The render target that should be multisampled.
* @return {Boolean} Whether to use the `WEBGL_multisampled_render_to_texture` extension for MSAA or not.
*/
_useMultisampledRTT( renderTarget ) {
return renderTarget.samples > 0 && this.extensions.has( 'WEBGL_multisampled_render_to_texture' ) === true && renderTarget.autoAllocateDepthBuffer !== false;
}
/**
* Frees internal resources.
*/
dispose() {
const extension = this.extensions.get( 'WEBGL_lose_context' );
if ( extension ) extension.loseContext();
this.renderer.domElement.removeEventListener( 'webglcontextlost', this._onContextLost );
}
}
const GPUPrimitiveTopology = {
PointList: 'point-list',
LineList: 'line-list',
LineStrip: 'line-strip',
TriangleList: 'triangle-list',
TriangleStrip: 'triangle-strip',
};
const GPUCompareFunction = {
Never: 'never',
Less: 'less',
Equal: 'equal',
LessEqual: 'less-equal',
Greater: 'greater',
NotEqual: 'not-equal',
GreaterEqual: 'greater-equal',
Always: 'always'
};
const GPUStoreOp = {
Store: 'store',
Discard: 'discard'
};
const GPULoadOp = {
Load: 'load',
Clear: 'clear'
};
const GPUFrontFace = {
CCW: 'ccw',
CW: 'cw'
};
const GPUCullMode = {
None: 'none',
Front: 'front',
Back: 'back'
};
const GPUIndexFormat = {
Uint16: 'uint16',
Uint32: 'uint32'
};
const GPUTextureFormat = {
// 8-bit formats
R8Unorm: 'r8unorm',
R8Snorm: 'r8snorm',
R8Uint: 'r8uint',
R8Sint: 'r8sint',
// 16-bit formats
R16Uint: 'r16uint',
R16Sint: 'r16sint',
R16Float: 'r16float',
RG8Unorm: 'rg8unorm',
RG8Snorm: 'rg8snorm',
RG8Uint: 'rg8uint',
RG8Sint: 'rg8sint',
// 32-bit formats
R32Uint: 'r32uint',
R32Sint: 'r32sint',
R32Float: 'r32float',
RG16Uint: 'rg16uint',
RG16Sint: 'rg16sint',
RG16Float: 'rg16float',
RGBA8Unorm: 'rgba8unorm',
RGBA8UnormSRGB: 'rgba8unorm-srgb',
RGBA8Snorm: 'rgba8snorm',
RGBA8Uint: 'rgba8uint',
RGBA8Sint: 'rgba8sint',
BGRA8Unorm: 'bgra8unorm',
BGRA8UnormSRGB: 'bgra8unorm-srgb',
// Packed 32-bit formats
RGB9E5UFloat: 'rgb9e5ufloat',
RGB10A2Unorm: 'rgb10a2unorm',
RG11B10uFloat: 'rgb10a2unorm',
// 64-bit formats
RG32Uint: 'rg32uint',
RG32Sint: 'rg32sint',
RG32Float: 'rg32float',
RGBA16Uint: 'rgba16uint',
RGBA16Sint: 'rgba16sint',
RGBA16Float: 'rgba16float',
// 128-bit formats
RGBA32Uint: 'rgba32uint',
RGBA32Sint: 'rgba32sint',
RGBA32Float: 'rgba32float',
// Depth and stencil formats
Stencil8: 'stencil8',
Depth16Unorm: 'depth16unorm',
Depth24Plus: 'depth24plus',
Depth24PlusStencil8: 'depth24plus-stencil8',
Depth32Float: 'depth32float',
// 'depth32float-stencil8' extension
Depth32FloatStencil8: 'depth32float-stencil8',
// BC compressed formats usable if 'texture-compression-bc' is both
// supported by the device/user agent and enabled in requestDevice.
BC1RGBAUnorm: 'bc1-rgba-unorm',
BC1RGBAUnormSRGB: 'bc1-rgba-unorm-srgb',
BC2RGBAUnorm: 'bc2-rgba-unorm',
BC2RGBAUnormSRGB: 'bc2-rgba-unorm-srgb',
BC3RGBAUnorm: 'bc3-rgba-unorm',
BC3RGBAUnormSRGB: 'bc3-rgba-unorm-srgb',
BC4RUnorm: 'bc4-r-unorm',
BC4RSnorm: 'bc4-r-snorm',
BC5RGUnorm: 'bc5-rg-unorm',
BC5RGSnorm: 'bc5-rg-snorm',
BC6HRGBUFloat: 'bc6h-rgb-ufloat',
BC6HRGBFloat: 'bc6h-rgb-float',
BC7RGBAUnorm: 'bc7-rgba-unorm',
BC7RGBAUnormSRGB: 'bc7-rgba-srgb',
// ETC2 compressed formats usable if 'texture-compression-etc2' is both
// supported by the device/user agent and enabled in requestDevice.
ETC2RGB8Unorm: 'etc2-rgb8unorm',
ETC2RGB8UnormSRGB: 'etc2-rgb8unorm-srgb',
ETC2RGB8A1Unorm: 'etc2-rgb8a1unorm',
ETC2RGB8A1UnormSRGB: 'etc2-rgb8a1unorm-srgb',
ETC2RGBA8Unorm: 'etc2-rgba8unorm',
ETC2RGBA8UnormSRGB: 'etc2-rgba8unorm-srgb',
EACR11Unorm: 'eac-r11unorm',
EACR11Snorm: 'eac-r11snorm',
EACRG11Unorm: 'eac-rg11unorm',
EACRG11Snorm: 'eac-rg11snorm',
// ASTC compressed formats usable if 'texture-compression-astc' is both
// supported by the device/user agent and enabled in requestDevice.
ASTC4x4Unorm: 'astc-4x4-unorm',
ASTC4x4UnormSRGB: 'astc-4x4-unorm-srgb',
ASTC5x4Unorm: 'astc-5x4-unorm',
ASTC5x4UnormSRGB: 'astc-5x4-unorm-srgb',
ASTC5x5Unorm: 'astc-5x5-unorm',
ASTC5x5UnormSRGB: 'astc-5x5-unorm-srgb',
ASTC6x5Unorm: 'astc-6x5-unorm',
ASTC6x5UnormSRGB: 'astc-6x5-unorm-srgb',
ASTC6x6Unorm: 'astc-6x6-unorm',
ASTC6x6UnormSRGB: 'astc-6x6-unorm-srgb',
ASTC8x5Unorm: 'astc-8x5-unorm',
ASTC8x5UnormSRGB: 'astc-8x5-unorm-srgb',
ASTC8x6Unorm: 'astc-8x6-unorm',
ASTC8x6UnormSRGB: 'astc-8x6-unorm-srgb',
ASTC8x8Unorm: 'astc-8x8-unorm',
ASTC8x8UnormSRGB: 'astc-8x8-unorm-srgb',
ASTC10x5Unorm: 'astc-10x5-unorm',
ASTC10x5UnormSRGB: 'astc-10x5-unorm-srgb',
ASTC10x6Unorm: 'astc-10x6-unorm',
ASTC10x6UnormSRGB: 'astc-10x6-unorm-srgb',
ASTC10x8Unorm: 'astc-10x8-unorm',
ASTC10x8UnormSRGB: 'astc-10x8-unorm-srgb',
ASTC10x10Unorm: 'astc-10x10-unorm',
ASTC10x10UnormSRGB: 'astc-10x10-unorm-srgb',
ASTC12x10Unorm: 'astc-12x10-unorm',
ASTC12x10UnormSRGB: 'astc-12x10-unorm-srgb',
ASTC12x12Unorm: 'astc-12x12-unorm',
ASTC12x12UnormSRGB: 'astc-12x12-unorm-srgb',
};
const GPUAddressMode = {
ClampToEdge: 'clamp-to-edge',
Repeat: 'repeat',
MirrorRepeat: 'mirror-repeat'
};
const GPUFilterMode = {
Linear: 'linear',
Nearest: 'nearest'
};
const GPUBlendFactor = {
Zero: 'zero',
One: 'one',
Src: 'src',
OneMinusSrc: 'one-minus-src',
SrcAlpha: 'src-alpha',
OneMinusSrcAlpha: 'one-minus-src-alpha',
Dst: 'dst',
OneMinusDstColor: 'one-minus-dst',
DstAlpha: 'dst-alpha',
OneMinusDstAlpha: 'one-minus-dst-alpha',
SrcAlphaSaturated: 'src-alpha-saturated',
Constant: 'constant',
OneMinusConstant: 'one-minus-constant'
};
const GPUBlendOperation = {
Add: 'add',
Subtract: 'subtract',
ReverseSubtract: 'reverse-subtract',
Min: 'min',
Max: 'max'
};
const GPUColorWriteFlags = {
None: 0,
Red: 0x1,
Green: 0x2,
Blue: 0x4,
Alpha: 0x8,
All: 0xF
};
const GPUStencilOperation = {
Keep: 'keep',
Zero: 'zero',
Replace: 'replace',
Invert: 'invert',
IncrementClamp: 'increment-clamp',
DecrementClamp: 'decrement-clamp',
IncrementWrap: 'increment-wrap',
DecrementWrap: 'decrement-wrap'
};
const GPUBufferBindingType = {
Uniform: 'uniform',
Storage: 'storage',
ReadOnlyStorage: 'read-only-storage'
};
const GPUStorageTextureAccess = {
WriteOnly: 'write-only',
ReadOnly: 'read-only',
ReadWrite: 'read-write',
};
const GPUTextureSampleType = {
Float: 'float',
UnfilterableFloat: 'unfilterable-float',
Depth: 'depth',
SInt: 'sint',
UInt: 'uint'
};
const GPUTextureDimension = {
OneD: '1d',
TwoD: '2d',
ThreeD: '3d'
};
const GPUTextureViewDimension = {
OneD: '1d',
TwoD: '2d',
TwoDArray: '2d-array',
Cube: 'cube',
CubeArray: 'cube-array',
ThreeD: '3d'
};
const GPUTextureAspect = {
All: 'all',
StencilOnly: 'stencil-only',
DepthOnly: 'depth-only'
};
const GPUInputStepMode = {
Vertex: 'vertex',
Instance: 'instance'
};
const GPUFeatureName = {
DepthClipControl: 'depth-clip-control',
Depth32FloatStencil8: 'depth32float-stencil8',
TextureCompressionBC: 'texture-compression-bc',
TextureCompressionETC2: 'texture-compression-etc2',
TextureCompressionASTC: 'texture-compression-astc',
TimestampQuery: 'timestamp-query',
IndirectFirstInstance: 'indirect-first-instance',
ShaderF16: 'shader-f16',
RG11B10UFloat: 'rg11b10ufloat-renderable',
BGRA8UNormStorage: 'bgra8unorm-storage',
Float32Filterable: 'float32-filterable',
ClipDistances: 'clip-distances',
DualSourceBlending: 'dual-source-blending',
Subgroups: 'subgroups'
};
/**
* Represents a sampler binding type.
*
* @private
* @augments Binding
*/
class Sampler extends Binding {
/**
* Constructs a new sampler.
*
* @param {String} name - The samplers's name.
* @param {Texture?} texture - The texture this binding is referring to.
*/
constructor( name, texture ) {
super( name );
/**
* The texture the sampler is referring to.
*
* @type {Texture?}
*/
this.texture = texture;
/**
* The binding's version.
*
* @type {Number}
*/
this.version = texture ? texture.version : 0;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isSampler = true;
}
}
/**
* A special form of sampler binding type.
* It's texture value is managed by a node object.
*
* @private
* @augments Sampler
*/
class NodeSampler extends Sampler {
/**
* Constructs a new node-based sampler.
*
* @param {String} name - The samplers's name.
* @param {TextureNode} textureNode - The texture node.
* @param {UniformGroupNode} groupNode - The uniform group node.
*/
constructor( name, textureNode, groupNode ) {
super( name, textureNode ? textureNode.value : null );
/**
* The texture node.
*
* @type {TextureNode}
*/
this.textureNode = textureNode;
/**
* The uniform group node.
*
* @type {UniformGroupNode}
*/
this.groupNode = groupNode;
}
/**
* Updates the texture value of this sampler.
*/
update() {
this.texture = this.textureNode.value;
}
}
/**
* Represents a storage buffer binding type.
*
* @private
* @augments Buffer
*/
class StorageBuffer extends Buffer {
/**
* Constructs a new uniform buffer.
*
* @param {String} name - The buffer's name.
* @param {BufferAttribute} attribute - The buffer attribute.
*/
constructor( name, attribute ) {
super( name, attribute ? attribute.array : null );
/**
* This flag can be used for type testing.
*
* @type {BufferAttribute}
*/
this.attribute = attribute;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isStorageBuffer = true;
}
}
let _id = 0;
/**
* A special form of storage buffer binding type.
* It's buffer value is managed by a node object.
*
* @private
* @augments StorageBuffer
*/
class NodeStorageBuffer extends StorageBuffer {
/**
* Constructs a new node-based storage buffer.
*
* @param {StorageBufferNode} nodeUniform - The storage buffer node.
* @param {UniformGroupNode} groupNode - The uniform group node.
*/
constructor( nodeUniform, groupNode ) {
super( 'StorageBuffer_' + _id ++, nodeUniform ? nodeUniform.value : null );
/**
* The node uniform.
*
* @type {StorageBufferNode}
*/
this.nodeUniform = nodeUniform;
/**
* The access type.
*
* @type {String}
*/
this.access = nodeUniform ? nodeUniform.access : NodeAccess.READ_WRITE;
/**
* The uniform group node.
*
* @type {UniformGroupNode}
*/
this.groupNode = groupNode;
}
/**
* The storage buffer.
*
* @type {BufferAttribute}
*/
get buffer() {
return this.nodeUniform.value;
}
}
/**
* A WebGPU backend utility module used by {@link WebGPUTextureUtils}.
*
* @private
*/
class WebGPUTexturePassUtils extends DataMap {
/**
* Constructs a new utility object.
*
* @param {GPUDevice} device - The WebGPU device.
*/
constructor( device ) {
super();
/**
* The WebGPU device.
*
* @type {GPUDevice}
*/
this.device = device;
const mipmapVertexSource = `
struct VarysStruct {
@builtin( position ) Position: vec4<f32>,
@location( 0 ) vTex : vec2<f32>
};
@vertex
fn main( @builtin( vertex_index ) vertexIndex : u32 ) -> VarysStruct {
var Varys : VarysStruct;
var pos = array< vec2<f32>, 4 >(
vec2<f32>( -1.0, 1.0 ),
vec2<f32>( 1.0, 1.0 ),
vec2<f32>( -1.0, -1.0 ),
vec2<f32>( 1.0, -1.0 )
);
var tex = array< vec2<f32>, 4 >(
vec2<f32>( 0.0, 0.0 ),
vec2<f32>( 1.0, 0.0 ),
vec2<f32>( 0.0, 1.0 ),
vec2<f32>( 1.0, 1.0 )
);
Varys.vTex = tex[ vertexIndex ];
Varys.Position = vec4<f32>( pos[ vertexIndex ], 0.0, 1.0 );
return Varys;
}
`;
const mipmapFragmentSource = `
@group( 0 ) @binding( 0 )
var imgSampler : sampler;
@group( 0 ) @binding( 1 )
var img : texture_2d<f32>;
@fragment
fn main( @location( 0 ) vTex : vec2<f32> ) -> @location( 0 ) vec4<f32> {
return textureSample( img, imgSampler, vTex );
}
`;
const flipYFragmentSource = `
@group( 0 ) @binding( 0 )
var imgSampler : sampler;
@group( 0 ) @binding( 1 )
var img : texture_2d<f32>;
@fragment
fn main( @location( 0 ) vTex : vec2<f32> ) -> @location( 0 ) vec4<f32> {
return textureSample( img, imgSampler, vec2( vTex.x, 1.0 - vTex.y ) );
}
`;
/**
* The mipmap GPU sampler.
*
* @type {GPUSampler}
*/
this.mipmapSampler = device.createSampler( { minFilter: GPUFilterMode.Linear } );
/**
* The flipY GPU sampler.
*
* @type {GPUSampler}
*/
this.flipYSampler = device.createSampler( { minFilter: GPUFilterMode.Nearest } ); //@TODO?: Consider using textureLoad()
/**
* A cache for GPU render pipelines used for copy/transfer passes.
* Every texture format requires a unique pipeline.
*
* @type {Object<String,GPURenderPipeline>}
*/
this.transferPipelines = {};
/**
* A cache for GPU render pipelines used for flipY passes.
* Every texture format requires a unique pipeline.
*
* @type {Object<String,GPURenderPipeline>}
*/
this.flipYPipelines = {};
/**
* The mipmap vertex shader module.
*
* @type {GPUShaderModule}
*/
this.mipmapVertexShaderModule = device.createShaderModule( {
label: 'mipmapVertex',
code: mipmapVertexSource
} );
/**
* The mipmap fragment shader module.
*
* @type {GPUShaderModule}
*/
this.mipmapFragmentShaderModule = device.createShaderModule( {
label: 'mipmapFragment',
code: mipmapFragmentSource
} );
/**
* The flipY fragment shader module.
*
* @type {GPUShaderModule}
*/
this.flipYFragmentShaderModule = device.createShaderModule( {
label: 'flipYFragment',
code: flipYFragmentSource
} );
}
/**
* Returns a render pipeline for the internal copy render pass. The pass
* requires a unique render pipeline for each texture format.
*
* @param {String} format - The GPU texture format
* @return {GPURenderPipeline} The GPU render pipeline.
*/
getTransferPipeline( format ) {
let pipeline = this.transferPipelines[ format ];
if ( pipeline === undefined ) {
pipeline = this.device.createRenderPipeline( {
label: `mipmap-${ format }`,
vertex: {
module: this.mipmapVertexShaderModule,
entryPoint: 'main'
},
fragment: {
module: this.mipmapFragmentShaderModule,
entryPoint: 'main',
targets: [ { format } ]
},
primitive: {
topology: GPUPrimitiveTopology.TriangleStrip,
stripIndexFormat: GPUIndexFormat.Uint32
},
layout: 'auto'
} );
this.transferPipelines[ format ] = pipeline;
}
return pipeline;
}
/**
* Returns a render pipeline for the flipY render pass. The pass
* requires a unique render pipeline for each texture format.
*
* @param {String} format - The GPU texture format
* @return {GPURenderPipeline} The GPU render pipeline.
*/
getFlipYPipeline( format ) {
let pipeline = this.flipYPipelines[ format ];
if ( pipeline === undefined ) {
pipeline = this.device.createRenderPipeline( {
label: `flipY-${ format }`,
vertex: {
module: this.mipmapVertexShaderModule,
entryPoint: 'main'
},
fragment: {
module: this.flipYFragmentShaderModule,
entryPoint: 'main',
targets: [ { format } ]
},
primitive: {
topology: GPUPrimitiveTopology.TriangleStrip,
stripIndexFormat: GPUIndexFormat.Uint32
},
layout: 'auto'
} );
this.flipYPipelines[ format ] = pipeline;
}
return pipeline;
}
/**
* Flip the contents of the given GPU texture along its vertical axis.
*
* @param {GPUTexture} textureGPU - The GPU texture object.
* @param {Object} textureGPUDescriptor - The texture descriptor.
* @param {Number} [baseArrayLayer=0] - The index of the first array layer accessible to the texture view.
*/
flipY( textureGPU, textureGPUDescriptor, baseArrayLayer = 0 ) {
const format = textureGPUDescriptor.format;
const { width, height } = textureGPUDescriptor.size;
const transferPipeline = this.getTransferPipeline( format );
const flipYPipeline = this.getFlipYPipeline( format );
const tempTexture = this.device.createTexture( {
size: { width, height, depthOrArrayLayers: 1 },
format,
usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING
} );
const srcView = textureGPU.createView( {
baseMipLevel: 0,
mipLevelCount: 1,
dimension: GPUTextureViewDimension.TwoD,
baseArrayLayer
} );
const dstView = tempTexture.createView( {
baseMipLevel: 0,
mipLevelCount: 1,
dimension: GPUTextureViewDimension.TwoD,
baseArrayLayer: 0
} );
const commandEncoder = this.device.createCommandEncoder( {} );
const pass = ( pipeline, sourceView, destinationView ) => {
const bindGroupLayout = pipeline.getBindGroupLayout( 0 ); // @TODO: Consider making this static.
const bindGroup = this.device.createBindGroup( {
layout: bindGroupLayout,
entries: [ {
binding: 0,
resource: this.flipYSampler
}, {
binding: 1,
resource: sourceView
} ]
} );
const passEncoder = commandEncoder.beginRenderPass( {
colorAttachments: [ {
view: destinationView,
loadOp: GPULoadOp.Clear,
storeOp: GPUStoreOp.Store,
clearValue: [ 0, 0, 0, 0 ]
} ]
} );
passEncoder.setPipeline( pipeline );
passEncoder.setBindGroup( 0, bindGroup );
passEncoder.draw( 4, 1, 0, 0 );
passEncoder.end();
};
pass( transferPipeline, srcView, dstView );
pass( flipYPipeline, dstView, srcView );
this.device.queue.submit( [ commandEncoder.finish() ] );
tempTexture.destroy();
}
/**
* Generates mipmaps for the given GPU texture.
*
* @param {GPUTexture} textureGPU - The GPU texture object.
* @param {Object} textureGPUDescriptor - The texture descriptor.
* @param {Number} [baseArrayLayer=0] - The index of the first array layer accessible to the texture view.
*/
generateMipmaps( textureGPU, textureGPUDescriptor, baseArrayLayer = 0 ) {
const textureData = this.get( textureGPU );
if ( textureData.useCount === undefined ) {
textureData.useCount = 0;
textureData.layers = [];
}
const passes = textureData.layers[ baseArrayLayer ] || this._mipmapCreateBundles( textureGPU, textureGPUDescriptor, baseArrayLayer );
const commandEncoder = this.device.createCommandEncoder( {} );
this._mipmapRunBundles( commandEncoder, passes );
this.device.queue.submit( [ commandEncoder.finish() ] );
if ( textureData.useCount !== 0 ) textureData.layers[ baseArrayLayer ] = passes;
textureData.useCount ++;
}
/**
* Since multiple copy render passes are required to generate mipmaps, the passes
* are managed as render bundles to improve performance.
*
* @param {GPUTexture} textureGPU - The GPU texture object.
* @param {Object} textureGPUDescriptor - The texture descriptor.
* @param {Number} baseArrayLayer - The index of the first array layer accessible to the texture view.
* @return {Array} An array of render bundles.
*/
_mipmapCreateBundles( textureGPU, textureGPUDescriptor, baseArrayLayer ) {
const pipeline = this.getTransferPipeline( textureGPUDescriptor.format );
const bindGroupLayout = pipeline.getBindGroupLayout( 0 ); // @TODO: Consider making this static.
let srcView = textureGPU.createView( {
baseMipLevel: 0,
mipLevelCount: 1,
dimension: GPUTextureViewDimension.TwoD,
baseArrayLayer
} );
const passes = [];
for ( let i = 1; i < textureGPUDescriptor.mipLevelCount; i ++ ) {
const bindGroup = this.device.createBindGroup( {
layout: bindGroupLayout,
entries: [ {
binding: 0,
resource: this.mipmapSampler
}, {
binding: 1,
resource: srcView
} ]
} );
const dstView = textureGPU.createView( {
baseMipLevel: i,
mipLevelCount: 1,
dimension: GPUTextureViewDimension.TwoD,
baseArrayLayer
} );
const passDescriptor = {
colorAttachments: [ {
view: dstView,
loadOp: GPULoadOp.Clear,
storeOp: GPUStoreOp.Store,
clearValue: [ 0, 0, 0, 0 ]
} ]
};
const passEncoder = this.device.createRenderBundleEncoder( {
colorFormats: [ textureGPUDescriptor.format ]
} );
passEncoder.setPipeline( pipeline );
passEncoder.setBindGroup( 0, bindGroup );
passEncoder.draw( 4, 1, 0, 0 );
passes.push( {
renderBundles: [ passEncoder.finish() ],
passDescriptor
} );
srcView = dstView;
}
return passes;
}
/**
* Executes the render bundles.
*
* @param {GPUCommandEncoder} commandEncoder - The GPU command encoder.
* @param {Array} passes - An array of render bundles.
*/
_mipmapRunBundles( commandEncoder, passes ) {
const levels = passes.length;
for ( let i = 0; i < levels; i ++ ) {
const pass = passes[ i ];
const passEncoder = commandEncoder.beginRenderPass( pass.passDescriptor );
passEncoder.executeBundles( pass.renderBundles );
passEncoder.end();
}
}
}
const _compareToWebGPU = {
[ NeverCompare ]: 'never',
[ LessCompare ]: 'less',
[ EqualCompare ]: 'equal',
[ LessEqualCompare ]: 'less-equal',
[ GreaterCompare ]: 'greater',
[ GreaterEqualCompare ]: 'greater-equal',
[ AlwaysCompare ]: 'always',
[ NotEqualCompare ]: 'not-equal'
};
const _flipMap = [ 0, 1, 3, 2, 4, 5 ];
/**
* A WebGPU backend utility module for managing textures.
*
* @private
*/
class WebGPUTextureUtils {
/**
* Constructs a new utility object.
*
* @param {WebGPUBackend} backend - The WebGPU backend.
*/
constructor( backend ) {
/**
* A reference to the WebGPU backend.
*
* @type {WebGPUBackend}
*/
this.backend = backend;
/**
* A reference to the pass utils.
*
* @type {WebGPUTexturePassUtils?}
* @default null
*/
this._passUtils = null;
/**
* A dictionary for managing default textures. The key
* is the texture format, the value the texture object.
*
* @type {Object<String,Texture>}
*/
this.defaultTexture = {};
/**
* A dictionary for managing default cube textures. The key
* is the texture format, the value the texture object.
*
* @type {Object<String,CubeTexture>}
*/
this.defaultCubeTexture = {};
/**
* A default video frame.
*
* @type {VideoFrame?}
* @default null
*/
this.defaultVideoFrame = null;
/**
* Represents the color attachment of the default framebuffer.
*
* @type {GPUTexture?}
* @default null
*/
this.colorBuffer = null;
/**
* Represents the depth attachment of the default framebuffer.
*
* @type {DepthTexture}
*/
this.depthTexture = new DepthTexture();
this.depthTexture.name = 'depthBuffer';
}
/**
* Creates a GPU sampler for the given texture.
*
* @param {Texture} texture - The texture to create the sampler for.
*/
createSampler( texture ) {
const backend = this.backend;
const device = backend.device;
const textureGPU = backend.get( texture );
const samplerDescriptorGPU = {
addressModeU: this._convertAddressMode( texture.wrapS ),
addressModeV: this._convertAddressMode( texture.wrapT ),
addressModeW: this._convertAddressMode( texture.wrapR ),
magFilter: this._convertFilterMode( texture.magFilter ),
minFilter: this._convertFilterMode( texture.minFilter ),
mipmapFilter: this._convertFilterMode( texture.minFilter ),
maxAnisotropy: 1
};
// anisotropy can only be used when all filter modes are set to linear.
if ( samplerDescriptorGPU.magFilter === GPUFilterMode.Linear && samplerDescriptorGPU.minFilter === GPUFilterMode.Linear && samplerDescriptorGPU.mipmapFilter === GPUFilterMode.Linear ) {
samplerDescriptorGPU.maxAnisotropy = texture.anisotropy;
}
if ( texture.isDepthTexture && texture.compareFunction !== null ) {
samplerDescriptorGPU.compare = _compareToWebGPU[ texture.compareFunction ];
}
textureGPU.sampler = device.createSampler( samplerDescriptorGPU );
}
/**
* Creates a default texture for the given texture that can be used
* as a placeholder until the actual texture is ready for usage.
*
* @param {Texture} texture - The texture to create a default texture for.
*/
createDefaultTexture( texture ) {
let textureGPU;
const format = getFormat( texture );
if ( texture.isCubeTexture ) {
textureGPU = this._getDefaultCubeTextureGPU( format );
} else if ( texture.isVideoTexture ) {
this.backend.get( texture ).externalTexture = this._getDefaultVideoFrame();
} else {
textureGPU = this._getDefaultTextureGPU( format );
}
this.backend.get( texture ).texture = textureGPU;
}
/**
* Defines a texture on the GPU for the given texture object.
*
* @param {Texture} texture - The texture.
* @param {Object} [options={}] - Optional configuration parameter.
* @return {undefined}
*/
createTexture( texture, options = {} ) {
const backend = this.backend;
const textureData = backend.get( texture );
if ( textureData.initialized ) {
throw new Error( 'WebGPUTextureUtils: Texture already initialized.' );
}
if ( options.needsMipmaps === undefined ) options.needsMipmaps = false;
if ( options.levels === undefined ) options.levels = 1;
if ( options.depth === undefined ) options.depth = 1;
const { width, height, depth, levels } = options;
if ( texture.isFramebufferTexture ) {
if ( options.renderTarget ) {
options.format = this.backend.utils.getCurrentColorFormat( options.renderTarget );
} else {
options.format = this.backend.utils.getPreferredCanvasFormat();
}
}
const dimension = this._getDimension( texture );
const format = texture.internalFormat || options.format || getFormat( texture, backend.device );
textureData.format = format;
const { samples, primarySamples, isMSAA } = backend.utils.getTextureSampleData( texture );
let usage = GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.COPY_DST | GPUTextureUsage.COPY_SRC;
if ( texture.isStorageTexture === true ) {
usage |= GPUTextureUsage.STORAGE_BINDING;
}
if ( texture.isCompressedTexture !== true && texture.isCompressedArrayTexture !== true ) {
usage |= GPUTextureUsage.RENDER_ATTACHMENT;
}
const textureDescriptorGPU = {
label: texture.name,
size: {
width: width,
height: height,
depthOrArrayLayers: depth,
},
mipLevelCount: levels,
sampleCount: primarySamples,
dimension: dimension,
format: format,
usage: usage
};
// texture creation
if ( texture.isVideoTexture ) {
const video = texture.source.data;
const videoFrame = new VideoFrame( video );
textureDescriptorGPU.size.width = videoFrame.displayWidth;
textureDescriptorGPU.size.height = videoFrame.displayHeight;
videoFrame.close();
textureData.externalTexture = video;
} else {
if ( format === undefined ) {
console.warn( 'WebGPURenderer: Texture format not supported.' );
return this.createDefaultTexture( texture );
}
textureData.texture = backend.device.createTexture( textureDescriptorGPU );
}
if ( isMSAA ) {
const msaaTextureDescriptorGPU = Object.assign( {}, textureDescriptorGPU );
msaaTextureDescriptorGPU.label = msaaTextureDescriptorGPU.label + '-msaa';
msaaTextureDescriptorGPU.sampleCount = samples;
textureData.msaaTexture = backend.device.createTexture( msaaTextureDescriptorGPU );
}
textureData.initialized = true;
textureData.textureDescriptorGPU = textureDescriptorGPU;
}
/**
* Destroys the GPU data for the given texture object.
*
* @param {Texture} texture - The texture.
*/
destroyTexture( texture ) {
const backend = this.backend;
const textureData = backend.get( texture );
if ( textureData.texture !== undefined ) textureData.texture.destroy();
if ( textureData.msaaTexture !== undefined ) textureData.msaaTexture.destroy();
backend.delete( texture );
}
/**
* Destroys the GPU sampler for the given texture.
*
* @param {Texture} texture - The texture to destroy the sampler for.
*/
destroySampler( texture ) {
const backend = this.backend;
const textureData = backend.get( texture );
delete textureData.sampler;
}
/**
* Generates mipmaps for the given texture.
*
* @param {Texture} texture - The texture.
*/
generateMipmaps( texture ) {
const textureData = this.backend.get( texture );
if ( texture.isCubeTexture ) {
for ( let i = 0; i < 6; i ++ ) {
this._generateMipmaps( textureData.texture, textureData.textureDescriptorGPU, i );
}
} else {
const depth = texture.image.depth || 1;
for ( let i = 0; i < depth; i ++ ) {
this._generateMipmaps( textureData.texture, textureData.textureDescriptorGPU, i );
}
}
}
/**
* Returns the color buffer representing the color
* attachment of the default framebuffer.
*
* @return {GPUTexture} The color buffer.
*/
getColorBuffer() {
if ( this.colorBuffer ) this.colorBuffer.destroy();
const backend = this.backend;
const { width, height } = backend.getDrawingBufferSize();
this.colorBuffer = backend.device.createTexture( {
label: 'colorBuffer',
size: {
width: width,
height: height,
depthOrArrayLayers: 1
},
sampleCount: backend.utils.getSampleCount( backend.renderer.samples ),
format: backend.utils.getPreferredCanvasFormat(),
usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC
} );
return this.colorBuffer;
}
/**
* Returns the depth buffer representing the depth
* attachment of the default framebuffer.
*
* @param {Boolean} [depth=true] - Whether depth is enabled or not.
* @param {Boolean} [stencil=false] - Whether stencil is enabled or not.
* @return {GPUTexture} The depth buffer.
*/
getDepthBuffer( depth = true, stencil = false ) {
const backend = this.backend;
const { width, height } = backend.getDrawingBufferSize();
const depthTexture = this.depthTexture;
const depthTextureGPU = backend.get( depthTexture ).texture;
let format, type;
if ( stencil ) {
format = DepthStencilFormat;
type = UnsignedInt248Type;
} else if ( depth ) {
format = DepthFormat;
type = UnsignedIntType;
}
if ( depthTextureGPU !== undefined ) {
if ( depthTexture.image.width === width && depthTexture.image.height === height && depthTexture.format === format && depthTexture.type === type ) {
return depthTextureGPU;
}
this.destroyTexture( depthTexture );
}
depthTexture.name = 'depthBuffer';
depthTexture.format = format;
depthTexture.type = type;
depthTexture.image.width = width;
depthTexture.image.height = height;
this.createTexture( depthTexture, { width, height } );
return backend.get( depthTexture ).texture;
}
/**
* Uploads the updated texture data to the GPU.
*
* @param {Texture} texture - The texture.
* @param {Object} [options={}] - Optional configuration parameter.
*/
updateTexture( texture, options ) {
const textureData = this.backend.get( texture );
const { textureDescriptorGPU } = textureData;
if ( texture.isRenderTargetTexture || ( textureDescriptorGPU === undefined /* unsupported texture format */ ) )
return;
// transfer texture data
if ( texture.isDataTexture ) {
this._copyBufferToTexture( options.image, textureData.texture, textureDescriptorGPU, 0, texture.flipY );
} else if ( texture.isDataArrayTexture || texture.isData3DTexture ) {
for ( let i = 0; i < options.image.depth; i ++ ) {
this._copyBufferToTexture( options.image, textureData.texture, textureDescriptorGPU, i, texture.flipY, i );
}
} else if ( texture.isCompressedTexture || texture.isCompressedArrayTexture ) {
this._copyCompressedBufferToTexture( texture.mipmaps, textureData.texture, textureDescriptorGPU );
} else if ( texture.isCubeTexture ) {
this._copyCubeMapToTexture( options.images, textureData.texture, textureDescriptorGPU, texture.flipY );
} else if ( texture.isVideoTexture ) {
const video = texture.source.data;
textureData.externalTexture = video;
} else {
this._copyImageToTexture( options.image, textureData.texture, textureDescriptorGPU, 0, texture.flipY );
}
//
textureData.version = texture.version;
if ( texture.onUpdate ) texture.onUpdate( texture );
}
/**
* Returns texture data as a typed array.
*
* @async
* @param {Texture} texture - The texture to copy.
* @param {Number} x - The x coordinate of the copy origin.
* @param {Number} y - The y coordinate of the copy origin.
* @param {Number} width - The width of the copy.
* @param {Number} height - The height of the copy.
* @param {Number} faceIndex - The face index.
* @return {Promise<TypedArray>} A Promise that resolves with a typed array when the copy operation has finished.
*/
async copyTextureToBuffer( texture, x, y, width, height, faceIndex ) {
const device = this.backend.device;
const textureData = this.backend.get( texture );
const textureGPU = textureData.texture;
const format = textureData.textureDescriptorGPU.format;
const bytesPerTexel = this._getBytesPerTexel( format );
let bytesPerRow = width * bytesPerTexel;
bytesPerRow = Math.ceil( bytesPerRow / 256 ) * 256; // Align to 256 bytes
const readBuffer = device.createBuffer(
{
size: width * height * bytesPerTexel,
usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ
}
);
const encoder = device.createCommandEncoder();
encoder.copyTextureToBuffer(
{
texture: textureGPU,
origin: { x, y, z: faceIndex },
},
{
buffer: readBuffer,
bytesPerRow: bytesPerRow
},
{
width: width,
height: height
}
);
const typedArrayType = this._getTypedArrayType( format );
device.queue.submit( [ encoder.finish() ] );
await readBuffer.mapAsync( GPUMapMode.READ );
const buffer = readBuffer.getMappedRange();
return new typedArrayType( buffer );
}
/**
* Returns `true` if the given texture is an environment map.
*
* @private
* @param {Texture} texture - The texture.
* @return {Boolean} Whether the given texture is an environment map or not.
*/
_isEnvironmentTexture( texture ) {
const mapping = texture.mapping;
return ( mapping === EquirectangularReflectionMapping || mapping === EquirectangularRefractionMapping ) || ( mapping === CubeReflectionMapping || mapping === CubeRefractionMapping );
}
/**
* Returns the default GPU texture for the given format.
*
* @private
* @param {String} format - The GPU format.
* @return {GPUTexture} The GPU texture.
*/
_getDefaultTextureGPU( format ) {
let defaultTexture = this.defaultTexture[ format ];
if ( defaultTexture === undefined ) {
const texture = new Texture();
texture.minFilter = NearestFilter;
texture.magFilter = NearestFilter;
this.createTexture( texture, { width: 1, height: 1, format } );
this.defaultTexture[ format ] = defaultTexture = texture;
}
return this.backend.get( defaultTexture ).texture;
}
/**
* Returns the default GPU cube texture for the given format.
*
* @private
* @param {String} format - The GPU format.
* @return {GPUTexture} The GPU texture.
*/
_getDefaultCubeTextureGPU( format ) {
let defaultCubeTexture = this.defaultTexture[ format ];
if ( defaultCubeTexture === undefined ) {
const texture = new CubeTexture();
texture.minFilter = NearestFilter;
texture.magFilter = NearestFilter;
this.createTexture( texture, { width: 1, height: 1, depth: 6 } );
this.defaultCubeTexture[ format ] = defaultCubeTexture = texture;
}
return this.backend.get( defaultCubeTexture ).texture;
}
/**
* Returns the default video frame used as default data in context of video textures.
*
* @private
* @return {VideoFrame} The video frame.
*/
_getDefaultVideoFrame() {
let defaultVideoFrame = this.defaultVideoFrame;
if ( defaultVideoFrame === null ) {
const init = {
timestamp: 0,
codedWidth: 1,
codedHeight: 1,
format: 'RGBA',
};
this.defaultVideoFrame = defaultVideoFrame = new VideoFrame( new Uint8Array( [ 0, 0, 0, 0xff ] ), init );
}
return defaultVideoFrame;
}
/**
* Uploads cube texture image data to the GPU memory.
*
* @private
* @param {Array} images - The cube image data.
* @param {GPUTexture} textureGPU - The GPU texture.
* @param {Object} textureDescriptorGPU - The GPU texture descriptor.
* @param {Boolean} flipY - Whether to flip texture data along their vertical axis or not.
*/
_copyCubeMapToTexture( images, textureGPU, textureDescriptorGPU, flipY ) {
for ( let i = 0; i < 6; i ++ ) {
const image = images[ i ];
const flipIndex = flipY === true ? _flipMap[ i ] : i;
if ( image.isDataTexture ) {
this._copyBufferToTexture( image.image, textureGPU, textureDescriptorGPU, flipIndex, flipY );
} else {
this._copyImageToTexture( image, textureGPU, textureDescriptorGPU, flipIndex, flipY );
}
}
}
/**
* Uploads texture image data to the GPU memory.
*
* @private
* @param {HTMLImageElement|ImageBitmap|HTMLCanvasElement} image - The image data.
* @param {GPUTexture} textureGPU - The GPU texture.
* @param {Object} textureDescriptorGPU - The GPU texture descriptor.
* @param {Number} originDepth - The origin depth.
* @param {Boolean} flipY - Whether to flip texture data along their vertical axis or not.
*/
_copyImageToTexture( image, textureGPU, textureDescriptorGPU, originDepth, flipY ) {
const device = this.backend.device;
device.queue.copyExternalImageToTexture(
{
source: image,
flipY: flipY
}, {
texture: textureGPU,
mipLevel: 0,
origin: { x: 0, y: 0, z: originDepth }
}, {
width: image.width,
height: image.height,
depthOrArrayLayers: 1
}
);
}
/**
* Returns the pass utils singleton.
*
* @private
* @return {WebGPUTexturePassUtils} The utils instance.
*/
_getPassUtils() {
let passUtils = this._passUtils;
if ( passUtils === null ) {
this._passUtils = passUtils = new WebGPUTexturePassUtils( this.backend.device );
}
return passUtils;
}
/**
* Generates mipmaps for the given GPU texture.
*
* @private
* @param {GPUTexture} textureGPU - The GPU texture object.
* @param {Object} textureDescriptorGPU - The texture descriptor.
* @param {Number} [baseArrayLayer=0] - The index of the first array layer accessible to the texture view.
*/
_generateMipmaps( textureGPU, textureDescriptorGPU, baseArrayLayer = 0 ) {
this._getPassUtils().generateMipmaps( textureGPU, textureDescriptorGPU, baseArrayLayer );
}
/**
* Flip the contents of the given GPU texture along its vertical axis.
*
* @private
* @param {GPUTexture} textureGPU - The GPU texture object.
* @param {Object} textureDescriptorGPU - The texture descriptor.
* @param {Number} [originDepth=0] - The origin depth.
*/
_flipY( textureGPU, textureDescriptorGPU, originDepth = 0 ) {
this._getPassUtils().flipY( textureGPU, textureDescriptorGPU, originDepth );
}
/**
* Uploads texture buffer data to the GPU memory.
*
* @private
* @param {Object} image - An object defining the image buffer data.
* @param {GPUTexture} textureGPU - The GPU texture.
* @param {Object} textureDescriptorGPU - The GPU texture descriptor.
* @param {Number} originDepth - The origin depth.
* @param {Boolean} flipY - Whether to flip texture data along their vertical axis or not.
* @param {Number} [depth=0] - TODO.
*/
_copyBufferToTexture( image, textureGPU, textureDescriptorGPU, originDepth, flipY, depth = 0 ) {
// @TODO: Consider to use GPUCommandEncoder.copyBufferToTexture()
// @TODO: Consider to support valid buffer layouts with other formats like RGB
const device = this.backend.device;
const data = image.data;
const bytesPerTexel = this._getBytesPerTexel( textureDescriptorGPU.format );
const bytesPerRow = image.width * bytesPerTexel;
device.queue.writeTexture(
{
texture: textureGPU,
mipLevel: 0,
origin: { x: 0, y: 0, z: originDepth }
},
data,
{
offset: image.width * image.height * bytesPerTexel * depth,
bytesPerRow
},
{
width: image.width,
height: image.height,
depthOrArrayLayers: 1
} );
if ( flipY === true ) {
this._flipY( textureGPU, textureDescriptorGPU, originDepth );
}
}
/**
* Uploads compressed texture data to the GPU memory.
*
* @private
* @param {Array<Object>} mipmaps - An array with mipmap data.
* @param {GPUTexture} textureGPU - The GPU texture.
* @param {Object} textureDescriptorGPU - The GPU texture descriptor.
*/
_copyCompressedBufferToTexture( mipmaps, textureGPU, textureDescriptorGPU ) {
// @TODO: Consider to use GPUCommandEncoder.copyBufferToTexture()
const device = this.backend.device;
const blockData = this._getBlockData( textureDescriptorGPU.format );
const isTextureArray = textureDescriptorGPU.size.depthOrArrayLayers > 1;
for ( let i = 0; i < mipmaps.length; i ++ ) {
const mipmap = mipmaps[ i ];
const width = mipmap.width;
const height = mipmap.height;
const depth = isTextureArray ? textureDescriptorGPU.size.depthOrArrayLayers : 1;
const bytesPerRow = Math.ceil( width / blockData.width ) * blockData.byteLength;
const bytesPerImage = bytesPerRow * Math.ceil( height / blockData.height );
for ( let j = 0; j < depth; j ++ ) {
device.queue.writeTexture(
{
texture: textureGPU,
mipLevel: i,
origin: { x: 0, y: 0, z: j }
},
mipmap.data,
{
offset: j * bytesPerImage,
bytesPerRow,
rowsPerImage: Math.ceil( height / blockData.height )
},
{
width: Math.ceil( width / blockData.width ) * blockData.width,
height: Math.ceil( height / blockData.height ) * blockData.height,
depthOrArrayLayers: 1
}
);
}
}
}
/**
* This method is only relevant for compressed texture formats. It returns a block
* data descriptor for the given GPU compressed texture format.
*
* @private
* @param {String} format - The GPU compressed texture format.
* @return {Object} The block data descriptor.
*/
_getBlockData( format ) {
if ( format === GPUTextureFormat.BC1RGBAUnorm || format === GPUTextureFormat.BC1RGBAUnormSRGB ) return { byteLength: 8, width: 4, height: 4 }; // DXT1
if ( format === GPUTextureFormat.BC2RGBAUnorm || format === GPUTextureFormat.BC2RGBAUnormSRGB ) return { byteLength: 16, width: 4, height: 4 }; // DXT3
if ( format === GPUTextureFormat.BC3RGBAUnorm || format === GPUTextureFormat.BC3RGBAUnormSRGB ) return { byteLength: 16, width: 4, height: 4 }; // DXT5
if ( format === GPUTextureFormat.BC4RUnorm || format === GPUTextureFormat.BC4RSNorm ) return { byteLength: 8, width: 4, height: 4 }; // RGTC1
if ( format === GPUTextureFormat.BC5RGUnorm || format === GPUTextureFormat.BC5RGSnorm ) return { byteLength: 16, width: 4, height: 4 }; // RGTC2
if ( format === GPUTextureFormat.BC6HRGBUFloat || format === GPUTextureFormat.BC6HRGBFloat ) return { byteLength: 16, width: 4, height: 4 }; // BPTC (float)
if ( format === GPUTextureFormat.BC7RGBAUnorm || format === GPUTextureFormat.BC7RGBAUnormSRGB ) return { byteLength: 16, width: 4, height: 4 }; // BPTC (unorm)
if ( format === GPUTextureFormat.ETC2RGB8Unorm || format === GPUTextureFormat.ETC2RGB8UnormSRGB ) return { byteLength: 8, width: 4, height: 4 };
if ( format === GPUTextureFormat.ETC2RGB8A1Unorm || format === GPUTextureFormat.ETC2RGB8A1UnormSRGB ) return { byteLength: 8, width: 4, height: 4 };
if ( format === GPUTextureFormat.ETC2RGBA8Unorm || format === GPUTextureFormat.ETC2RGBA8UnormSRGB ) return { byteLength: 16, width: 4, height: 4 };
if ( format === GPUTextureFormat.EACR11Unorm ) return { byteLength: 8, width: 4, height: 4 };
if ( format === GPUTextureFormat.EACR11Snorm ) return { byteLength: 8, width: 4, height: 4 };
if ( format === GPUTextureFormat.EACRG11Unorm ) return { byteLength: 16, width: 4, height: 4 };
if ( format === GPUTextureFormat.EACRG11Snorm ) return { byteLength: 16, width: 4, height: 4 };
if ( format === GPUTextureFormat.ASTC4x4Unorm || format === GPUTextureFormat.ASTC4x4UnormSRGB ) return { byteLength: 16, width: 4, height: 4 };
if ( format === GPUTextureFormat.ASTC5x4Unorm || format === GPUTextureFormat.ASTC5x4UnormSRGB ) return { byteLength: 16, width: 5, height: 4 };
if ( format === GPUTextureFormat.ASTC5x5Unorm || format === GPUTextureFormat.ASTC5x5UnormSRGB ) return { byteLength: 16, width: 5, height: 5 };
if ( format === GPUTextureFormat.ASTC6x5Unorm || format === GPUTextureFormat.ASTC6x5UnormSRGB ) return { byteLength: 16, width: 6, height: 5 };
if ( format === GPUTextureFormat.ASTC6x6Unorm || format === GPUTextureFormat.ASTC6x6UnormSRGB ) return { byteLength: 16, width: 6, height: 6 };
if ( format === GPUTextureFormat.ASTC8x5Unorm || format === GPUTextureFormat.ASTC8x5UnormSRGB ) return { byteLength: 16, width: 8, height: 5 };
if ( format === GPUTextureFormat.ASTC8x6Unorm || format === GPUTextureFormat.ASTC8x6UnormSRGB ) return { byteLength: 16, width: 8, height: 6 };
if ( format === GPUTextureFormat.ASTC8x8Unorm || format === GPUTextureFormat.ASTC8x8UnormSRGB ) return { byteLength: 16, width: 8, height: 8 };
if ( format === GPUTextureFormat.ASTC10x5Unorm || format === GPUTextureFormat.ASTC10x5UnormSRGB ) return { byteLength: 16, width: 10, height: 5 };
if ( format === GPUTextureFormat.ASTC10x6Unorm || format === GPUTextureFormat.ASTC10x6UnormSRGB ) return { byteLength: 16, width: 10, height: 6 };
if ( format === GPUTextureFormat.ASTC10x8Unorm || format === GPUTextureFormat.ASTC10x8UnormSRGB ) return { byteLength: 16, width: 10, height: 8 };
if ( format === GPUTextureFormat.ASTC10x10Unorm || format === GPUTextureFormat.ASTC10x10UnormSRGB ) return { byteLength: 16, width: 10, height: 10 };
if ( format === GPUTextureFormat.ASTC12x10Unorm || format === GPUTextureFormat.ASTC12x10UnormSRGB ) return { byteLength: 16, width: 12, height: 10 };
if ( format === GPUTextureFormat.ASTC12x12Unorm || format === GPUTextureFormat.ASTC12x12UnormSRGB ) return { byteLength: 16, width: 12, height: 12 };
}
/**
* Converts the three.js uv wrapping constants to GPU address mode constants.
*
* @private
* @param {Number} value - The three.js constant defining a uv wrapping mode.
* @return {String} The GPU address mode.
*/
_convertAddressMode( value ) {
let addressMode = GPUAddressMode.ClampToEdge;
if ( value === RepeatWrapping ) {
addressMode = GPUAddressMode.Repeat;
} else if ( value === MirroredRepeatWrapping ) {
addressMode = GPUAddressMode.MirrorRepeat;
}
return addressMode;
}
/**
* Converts the three.js filter constants to GPU filter constants.
*
* @private
* @param {Number} value - The three.js constant defining a filter mode.
* @return {String} The GPU filter mode.
*/
_convertFilterMode( value ) {
let filterMode = GPUFilterMode.Linear;
if ( value === NearestFilter || value === NearestMipmapNearestFilter || value === NearestMipmapLinearFilter ) {
filterMode = GPUFilterMode.Nearest;
}
return filterMode;
}
/**
* Returns the bytes-per-texel value for the given GPU texture format.
*
* @private
* @param {String} format - The GPU texture format.
* @return {Number} The bytes-per-texel.
*/
_getBytesPerTexel( format ) {
// 8-bit formats
if ( format === GPUTextureFormat.R8Unorm ||
format === GPUTextureFormat.R8Snorm ||
format === GPUTextureFormat.R8Uint ||
format === GPUTextureFormat.R8Sint ) return 1;
// 16-bit formats
if ( format === GPUTextureFormat.R16Uint ||
format === GPUTextureFormat.R16Sint ||
format === GPUTextureFormat.R16Float ||
format === GPUTextureFormat.RG8Unorm ||
format === GPUTextureFormat.RG8Snorm ||
format === GPUTextureFormat.RG8Uint ||
format === GPUTextureFormat.RG8Sint ) return 2;
// 32-bit formats
if ( format === GPUTextureFormat.R32Uint ||
format === GPUTextureFormat.R32Sint ||
format === GPUTextureFormat.R32Float ||
format === GPUTextureFormat.RG16Uint ||
format === GPUTextureFormat.RG16Sint ||
format === GPUTextureFormat.RG16Float ||
format === GPUTextureFormat.RGBA8Unorm ||
format === GPUTextureFormat.RGBA8UnormSRGB ||
format === GPUTextureFormat.RGBA8Snorm ||
format === GPUTextureFormat.RGBA8Uint ||
format === GPUTextureFormat.RGBA8Sint ||
format === GPUTextureFormat.BGRA8Unorm ||
format === GPUTextureFormat.BGRA8UnormSRGB ||
// Packed 32-bit formats
format === GPUTextureFormat.RGB9E5UFloat ||
format === GPUTextureFormat.RGB10A2Unorm ||
format === GPUTextureFormat.RG11B10UFloat ||
format === GPUTextureFormat.Depth32Float ||
format === GPUTextureFormat.Depth24Plus ||
format === GPUTextureFormat.Depth24PlusStencil8 ||
format === GPUTextureFormat.Depth32FloatStencil8 ) return 4;
// 64-bit formats
if ( format === GPUTextureFormat.RG32Uint ||
format === GPUTextureFormat.RG32Sint ||
format === GPUTextureFormat.RG32Float ||
format === GPUTextureFormat.RGBA16Uint ||
format === GPUTextureFormat.RGBA16Sint ||
format === GPUTextureFormat.RGBA16Float ) return 8;
// 128-bit formats
if ( format === GPUTextureFormat.RGBA32Uint ||
format === GPUTextureFormat.RGBA32Sint ||
format === GPUTextureFormat.RGBA32Float ) return 16;
}
/**
* Returns the corresponding typed array type for the given GPU texture format.
*
* @private
* @param {String} format - The GPU texture format.
* @return {TypedArray.constructor} The typed array type.
*/
_getTypedArrayType( format ) {
if ( format === GPUTextureFormat.R8Uint ) return Uint8Array;
if ( format === GPUTextureFormat.R8Sint ) return Int8Array;
if ( format === GPUTextureFormat.R8Unorm ) return Uint8Array;
if ( format === GPUTextureFormat.R8Snorm ) return Int8Array;
if ( format === GPUTextureFormat.RG8Uint ) return Uint8Array;
if ( format === GPUTextureFormat.RG8Sint ) return Int8Array;
if ( format === GPUTextureFormat.RG8Unorm ) return Uint8Array;
if ( format === GPUTextureFormat.RG8Snorm ) return Int8Array;
if ( format === GPUTextureFormat.RGBA8Uint ) return Uint8Array;
if ( format === GPUTextureFormat.RGBA8Sint ) return Int8Array;
if ( format === GPUTextureFormat.RGBA8Unorm ) return Uint8Array;
if ( format === GPUTextureFormat.RGBA8Snorm ) return Int8Array;
if ( format === GPUTextureFormat.R16Uint ) return Uint16Array;
if ( format === GPUTextureFormat.R16Sint ) return Int16Array;
if ( format === GPUTextureFormat.RG16Uint ) return Uint16Array;
if ( format === GPUTextureFormat.RG16Sint ) return Int16Array;
if ( format === GPUTextureFormat.RGBA16Uint ) return Uint16Array;
if ( format === GPUTextureFormat.RGBA16Sint ) return Int16Array;
if ( format === GPUTextureFormat.R16Float ) return Uint16Array;
if ( format === GPUTextureFormat.RG16Float ) return Uint16Array;
if ( format === GPUTextureFormat.RGBA16Float ) return Uint16Array;
if ( format === GPUTextureFormat.R32Uint ) return Uint32Array;
if ( format === GPUTextureFormat.R32Sint ) return Int32Array;
if ( format === GPUTextureFormat.R32Float ) return Float32Array;
if ( format === GPUTextureFormat.RG32Uint ) return Uint32Array;
if ( format === GPUTextureFormat.RG32Sint ) return Int32Array;
if ( format === GPUTextureFormat.RG32Float ) return Float32Array;
if ( format === GPUTextureFormat.RGBA32Uint ) return Uint32Array;
if ( format === GPUTextureFormat.RGBA32Sint ) return Int32Array;
if ( format === GPUTextureFormat.RGBA32Float ) return Float32Array;
if ( format === GPUTextureFormat.BGRA8Unorm ) return Uint8Array;
if ( format === GPUTextureFormat.BGRA8UnormSRGB ) return Uint8Array;
if ( format === GPUTextureFormat.RGB10A2Unorm ) return Uint32Array;
if ( format === GPUTextureFormat.RGB9E5UFloat ) return Uint32Array;
if ( format === GPUTextureFormat.RG11B10UFloat ) return Uint32Array;
if ( format === GPUTextureFormat.Depth32Float ) return Float32Array;
if ( format === GPUTextureFormat.Depth24Plus ) return Uint32Array;
if ( format === GPUTextureFormat.Depth24PlusStencil8 ) return Uint32Array;
if ( format === GPUTextureFormat.Depth32FloatStencil8 ) return Float32Array;
}
/**
* Returns the GPU dimensions for the given texture.
*
* @private
* @param {Texture} texture - The texture.
* @return {String} The GPU dimension.
*/
_getDimension( texture ) {
let dimension;
if ( texture.isData3DTexture ) {
dimension = GPUTextureDimension.ThreeD;
} else {
dimension = GPUTextureDimension.TwoD;
}
return dimension;
}
}
/**
* Returns the GPU format for the given texture.
*
* @param {Texture} texture - The texture.
* @param {GPUDevice?} [device=null] - The GPU device which is used for feature detection.
* It is not necessary to apply the device for most formats.
* @return {String} The GPU format.
*/
function getFormat( texture, device = null ) {
const format = texture.format;
const type = texture.type;
const colorSpace = texture.colorSpace;
let formatGPU;
if ( texture.isCompressedTexture === true || texture.isCompressedArrayTexture === true ) {
switch ( format ) {
case RGBA_S3TC_DXT1_Format:
formatGPU = ( colorSpace === SRGBColorSpace ) ? GPUTextureFormat.BC1RGBAUnormSRGB : GPUTextureFormat.BC1RGBAUnorm;
break;
case RGBA_S3TC_DXT3_Format:
formatGPU = ( colorSpace === SRGBColorSpace ) ? GPUTextureFormat.BC2RGBAUnormSRGB : GPUTextureFormat.BC2RGBAUnorm;
break;
case RGBA_S3TC_DXT5_Format:
formatGPU = ( colorSpace === SRGBColorSpace ) ? GPUTextureFormat.BC3RGBAUnormSRGB : GPUTextureFormat.BC3RGBAUnorm;
break;
case RGB_ETC2_Format:
formatGPU = ( colorSpace === SRGBColorSpace ) ? GPUTextureFormat.ETC2RGB8UnormSRGB : GPUTextureFormat.ETC2RGB8Unorm;
break;
case RGBA_ETC2_EAC_Format:
formatGPU = ( colorSpace === SRGBColorSpace ) ? GPUTextureFormat.ETC2RGBA8UnormSRGB : GPUTextureFormat.ETC2RGBA8Unorm;
break;
case RGBA_ASTC_4x4_Format:
formatGPU = ( colorSpace === SRGBColorSpace ) ? GPUTextureFormat.ASTC4x4UnormSRGB : GPUTextureFormat.ASTC4x4Unorm;
break;
case RGBA_ASTC_5x4_Format:
formatGPU = ( colorSpace === SRGBColorSpace ) ? GPUTextureFormat.ASTC5x4UnormSRGB : GPUTextureFormat.ASTC5x4Unorm;
break;
case RGBA_ASTC_5x5_Format:
formatGPU = ( colorSpace === SRGBColorSpace ) ? GPUTextureFormat.ASTC5x5UnormSRGB : GPUTextureFormat.ASTC5x5Unorm;
break;
case RGBA_ASTC_6x5_Format:
formatGPU = ( colorSpace === SRGBColorSpace ) ? GPUTextureFormat.ASTC6x5UnormSRGB : GPUTextureFormat.ASTC6x5Unorm;
break;
case RGBA_ASTC_6x6_Format:
formatGPU = ( colorSpace === SRGBColorSpace ) ? GPUTextureFormat.ASTC6x6UnormSRGB : GPUTextureFormat.ASTC6x6Unorm;
break;
case RGBA_ASTC_8x5_Format:
formatGPU = ( colorSpace === SRGBColorSpace ) ? GPUTextureFormat.ASTC8x5UnormSRGB : GPUTextureFormat.ASTC8x5Unorm;
break;
case RGBA_ASTC_8x6_Format:
formatGPU = ( colorSpace === SRGBColorSpace ) ? GPUTextureFormat.ASTC8x6UnormSRGB : GPUTextureFormat.ASTC8x6Unorm;
break;
case RGBA_ASTC_8x8_Format:
formatGPU = ( colorSpace === SRGBColorSpace ) ? GPUTextureFormat.ASTC8x8UnormSRGB : GPUTextureFormat.ASTC8x8Unorm;
break;
case RGBA_ASTC_10x5_Format:
formatGPU = ( colorSpace === SRGBColorSpace ) ? GPUTextureFormat.ASTC10x5UnormSRGB : GPUTextureFormat.ASTC10x5Unorm;
break;
case RGBA_ASTC_10x6_Format:
formatGPU = ( colorSpace === SRGBColorSpace ) ? GPUTextureFormat.ASTC10x6UnormSRGB : GPUTextureFormat.ASTC10x6Unorm;
break;
case RGBA_ASTC_10x8_Format:
formatGPU = ( colorSpace === SRGBColorSpace ) ? GPUTextureFormat.ASTC10x8UnormSRGB : GPUTextureFormat.ASTC10x8Unorm;
break;
case RGBA_ASTC_10x10_Format:
formatGPU = ( colorSpace === SRGBColorSpace ) ? GPUTextureFormat.ASTC10x10UnormSRGB : GPUTextureFormat.ASTC10x10Unorm;
break;
case RGBA_ASTC_12x10_Format:
formatGPU = ( colorSpace === SRGBColorSpace ) ? GPUTextureFormat.ASTC12x10UnormSRGB : GPUTextureFormat.ASTC12x10Unorm;
break;
case RGBA_ASTC_12x12_Format:
formatGPU = ( colorSpace === SRGBColorSpace ) ? GPUTextureFormat.ASTC12x12UnormSRGB : GPUTextureFormat.ASTC12x12Unorm;
break;
case RGBAFormat:
formatGPU = ( colorSpace === SRGBColorSpace ) ? GPUTextureFormat.RGBA8UnormSRGB : GPUTextureFormat.RGBA8Unorm;
break;
default:
console.error( 'WebGPURenderer: Unsupported texture format.', format );
}
} else {
switch ( format ) {
case RGBAFormat:
switch ( type ) {
case ByteType:
formatGPU = GPUTextureFormat.RGBA8Snorm;
break;
case ShortType:
formatGPU = GPUTextureFormat.RGBA16Sint;
break;
case UnsignedShortType:
formatGPU = GPUTextureFormat.RGBA16Uint;
break;
case UnsignedIntType:
formatGPU = GPUTextureFormat.RGBA32Uint;
break;
case IntType:
formatGPU = GPUTextureFormat.RGBA32Sint;
break;
case UnsignedByteType:
formatGPU = ( colorSpace === SRGBColorSpace ) ? GPUTextureFormat.RGBA8UnormSRGB : GPUTextureFormat.RGBA8Unorm;
break;
case HalfFloatType:
formatGPU = GPUTextureFormat.RGBA16Float;
break;
case FloatType:
formatGPU = GPUTextureFormat.RGBA32Float;
break;
default:
console.error( 'WebGPURenderer: Unsupported texture type with RGBAFormat.', type );
}
break;
case RGBFormat:
switch ( type ) {
case UnsignedInt5999Type:
formatGPU = GPUTextureFormat.RGB9E5UFloat;
break;
default:
console.error( 'WebGPURenderer: Unsupported texture type with RGBFormat.', type );
}
break;
case RedFormat:
switch ( type ) {
case ByteType:
formatGPU = GPUTextureFormat.R8Snorm;
break;
case ShortType:
formatGPU = GPUTextureFormat.R16Sint;
break;
case UnsignedShortType:
formatGPU = GPUTextureFormat.R16Uint;
break;
case UnsignedIntType:
formatGPU = GPUTextureFormat.R32Uint;
break;
case IntType:
formatGPU = GPUTextureFormat.R32Sint;
break;
case UnsignedByteType:
formatGPU = GPUTextureFormat.R8Unorm;
break;
case HalfFloatType:
formatGPU = GPUTextureFormat.R16Float;
break;
case FloatType:
formatGPU = GPUTextureFormat.R32Float;
break;
default:
console.error( 'WebGPURenderer: Unsupported texture type with RedFormat.', type );
}
break;
case RGFormat:
switch ( type ) {
case ByteType:
formatGPU = GPUTextureFormat.RG8Snorm;
break;
case ShortType:
formatGPU = GPUTextureFormat.RG16Sint;
break;
case UnsignedShortType:
formatGPU = GPUTextureFormat.RG16Uint;
break;
case UnsignedIntType:
formatGPU = GPUTextureFormat.RG32Uint;
break;
case IntType:
formatGPU = GPUTextureFormat.RG32Sint;
break;
case UnsignedByteType:
formatGPU = GPUTextureFormat.RG8Unorm;
break;
case HalfFloatType:
formatGPU = GPUTextureFormat.RG16Float;
break;
case FloatType:
formatGPU = GPUTextureFormat.RG32Float;
break;
default:
console.error( 'WebGPURenderer: Unsupported texture type with RGFormat.', type );
}
break;
case DepthFormat:
switch ( type ) {
case UnsignedShortType:
formatGPU = GPUTextureFormat.Depth16Unorm;
break;
case UnsignedIntType:
formatGPU = GPUTextureFormat.Depth24Plus;
break;
case FloatType:
formatGPU = GPUTextureFormat.Depth32Float;
break;
default:
console.error( 'WebGPURenderer: Unsupported texture type with DepthFormat.', type );
}
break;
case DepthStencilFormat:
switch ( type ) {
case UnsignedInt248Type:
formatGPU = GPUTextureFormat.Depth24PlusStencil8;
break;
case FloatType:
if ( device && device.features.has( GPUFeatureName.Depth32FloatStencil8 ) === false ) {
console.error( 'WebGPURenderer: Depth textures with DepthStencilFormat + FloatType can only be used with the "depth32float-stencil8" GPU feature.' );
}
formatGPU = GPUTextureFormat.Depth32FloatStencil8;
break;
default:
console.error( 'WebGPURenderer: Unsupported texture type with DepthStencilFormat.', type );
}
break;
case RedIntegerFormat:
switch ( type ) {
case IntType:
formatGPU = GPUTextureFormat.R32Sint;
break;
case UnsignedIntType:
formatGPU = GPUTextureFormat.R32Uint;
break;
default:
console.error( 'WebGPURenderer: Unsupported texture type with RedIntegerFormat.', type );
}
break;
case RGIntegerFormat:
switch ( type ) {
case IntType:
formatGPU = GPUTextureFormat.RG32Sint;
break;
case UnsignedIntType:
formatGPU = GPUTextureFormat.RG32Uint;
break;
default:
console.error( 'WebGPURenderer: Unsupported texture type with RGIntegerFormat.', type );
}
break;
case RGBAIntegerFormat:
switch ( type ) {
case IntType:
formatGPU = GPUTextureFormat.RGBA32Sint;
break;
case UnsignedIntType:
formatGPU = GPUTextureFormat.RGBA32Uint;
break;
default:
console.error( 'WebGPURenderer: Unsupported texture type with RGBAIntegerFormat.', type );
}
break;
default:
console.error( 'WebGPURenderer: Unsupported texture format.', format );
}
}
return formatGPU;
}
const declarationRegexp = /^[fn]*\s*([a-z_0-9]+)?\s*\(([\s\S]*?)\)\s*[\-\>]*\s*([a-z_0-9]+(?:<[\s\S]+?>)?)/i;
const propertiesRegexp = /([a-z_0-9]+)\s*:\s*([a-z_0-9]+(?:<[\s\S]+?>)?)/ig;
const wgslTypeLib$1 = {
'f32': 'float',
'i32': 'int',
'u32': 'uint',
'bool': 'bool',
'vec2<f32>': 'vec2',
'vec2<i32>': 'ivec2',
'vec2<u32>': 'uvec2',
'vec2<bool>': 'bvec2',
'vec2f': 'vec2',
'vec2i': 'ivec2',
'vec2u': 'uvec2',
'vec2b': 'bvec2',
'vec3<f32>': 'vec3',
'vec3<i32>': 'ivec3',
'vec3<u32>': 'uvec3',
'vec3<bool>': 'bvec3',
'vec3f': 'vec3',
'vec3i': 'ivec3',
'vec3u': 'uvec3',
'vec3b': 'bvec3',
'vec4<f32>': 'vec4',
'vec4<i32>': 'ivec4',
'vec4<u32>': 'uvec4',
'vec4<bool>': 'bvec4',
'vec4f': 'vec4',
'vec4i': 'ivec4',
'vec4u': 'uvec4',
'vec4b': 'bvec4',
'mat2x2<f32>': 'mat2',
'mat2x2f': 'mat2',
'mat3x3<f32>': 'mat3',
'mat3x3f': 'mat3',
'mat4x4<f32>': 'mat4',
'mat4x4f': 'mat4',
'sampler': 'sampler',
'texture_1d': 'texture',
'texture_2d': 'texture',
'texture_2d_array': 'texture',
'texture_multisampled_2d': 'cubeTexture',
'texture_depth_2d': 'depthTexture',
'texture_depth_multisampled_2d': 'depthTexture',
'texture_3d': 'texture3D',
'texture_cube': 'cubeTexture',
'texture_cube_array': 'cubeTexture',
'texture_storage_1d': 'storageTexture',
'texture_storage_2d': 'storageTexture',
'texture_storage_2d_array': 'storageTexture',
'texture_storage_3d': 'storageTexture'
};
const parse = ( source ) => {
source = source.trim();
const declaration = source.match( declarationRegexp );
if ( declaration !== null && declaration.length === 4 ) {
const inputsCode = declaration[ 2 ];
const propsMatches = [];
let match = null;
while ( ( match = propertiesRegexp.exec( inputsCode ) ) !== null ) {
propsMatches.push( { name: match[ 1 ], type: match[ 2 ] } );
}
// Process matches to correctly pair names and types
const inputs = [];
for ( let i = 0; i < propsMatches.length; i ++ ) {
const { name, type } = propsMatches[ i ];
let resolvedType = type;
if ( resolvedType.startsWith( 'ptr' ) ) {
resolvedType = 'pointer';
} else {
if ( resolvedType.startsWith( 'texture' ) ) {
resolvedType = type.split( '<' )[ 0 ];
}
resolvedType = wgslTypeLib$1[ resolvedType ];
}
inputs.push( new NodeFunctionInput( resolvedType, name ) );
}
const blockCode = source.substring( declaration[ 0 ].length );
const outputType = declaration[ 3 ] || 'void';
const name = declaration[ 1 ] !== undefined ? declaration[ 1 ] : '';
const type = wgslTypeLib$1[ outputType ] || outputType;
return {
type,
inputs,
name,
inputsCode,
blockCode,
outputType
};
} else {
throw new Error( 'FunctionNode: Function is not a WGSL code.' );
}
};
/**
* This class represents a WSL node function.
*
* @augments NodeFunction
*/
class WGSLNodeFunction extends NodeFunction {
/**
* Constructs a new WGSL node function.
*
* @param {String} source - The WGSL source.
*/
constructor( source ) {
const { type, inputs, name, inputsCode, blockCode, outputType } = parse( source );
super( type, inputs, name );
this.inputsCode = inputsCode;
this.blockCode = blockCode;
this.outputType = outputType;
}
/**
* This method returns the WGSL code of the node function.
*
* @param {String} [name=this.name] - The function's name.
* @return {String} The shader code.
*/
getCode( name = this.name ) {
const outputType = this.outputType !== 'void' ? '-> ' + this.outputType : '';
return `fn ${ name } ( ${ this.inputsCode.trim() } ) ${ outputType }` + this.blockCode;
}
}
/**
* A WGSL node parser.
*
* @augments NodeParser
*/
class WGSLNodeParser extends NodeParser {
/**
* The method parses the given WGSL code an returns a node function.
*
* @param {String} source - The WGSL code.
* @return {WGSLNodeFunction} A node function.
*/
parseFunction( source ) {
return new WGSLNodeFunction( source );
}
}
// GPUShaderStage is not defined in browsers not supporting WebGPU
const GPUShaderStage = ( typeof self !== 'undefined' ) ? self.GPUShaderStage : { VERTEX: 1, FRAGMENT: 2, COMPUTE: 4 };
const accessNames = {
[ NodeAccess.READ_ONLY ]: 'read',
[ NodeAccess.WRITE_ONLY ]: 'write',
[ NodeAccess.READ_WRITE ]: 'read_write'
};
const wrapNames = {
[ RepeatWrapping ]: 'repeat',
[ ClampToEdgeWrapping ]: 'clamp',
[ MirroredRepeatWrapping ]: 'mirror'
};
const gpuShaderStageLib = {
'vertex': GPUShaderStage ? GPUShaderStage.VERTEX : 1,
'fragment': GPUShaderStage ? GPUShaderStage.FRAGMENT : 2,
'compute': GPUShaderStage ? GPUShaderStage.COMPUTE : 4
};
const supports = {
instance: true,
swizzleAssign: false,
storageBuffer: true
};
const wgslFnOpLib = {
'^^': 'tsl_xor'
};
const wgslTypeLib = {
float: 'f32',
int: 'i32',
uint: 'u32',
bool: 'bool',
color: 'vec3<f32>',
vec2: 'vec2<f32>',
ivec2: 'vec2<i32>',
uvec2: 'vec2<u32>',
bvec2: 'vec2<bool>',
vec3: 'vec3<f32>',
ivec3: 'vec3<i32>',
uvec3: 'vec3<u32>',
bvec3: 'vec3<bool>',
vec4: 'vec4<f32>',
ivec4: 'vec4<i32>',
uvec4: 'vec4<u32>',
bvec4: 'vec4<bool>',
mat2: 'mat2x2<f32>',
mat3: 'mat3x3<f32>',
mat4: 'mat4x4<f32>'
};
const wgslCodeCache = {};
const wgslPolyfill = {
tsl_xor: new CodeNode( 'fn tsl_xor( a : bool, b : bool ) -> bool { return ( a || b ) && !( a && b ); }' ),
mod_float: new CodeNode( 'fn tsl_mod_float( x : f32, y : f32 ) -> f32 { return x - y * floor( x / y ); }' ),
mod_vec2: new CodeNode( 'fn tsl_mod_vec2( x : vec2f, y : vec2f ) -> vec2f { return x - y * floor( x / y ); }' ),
mod_vec3: new CodeNode( 'fn tsl_mod_vec3( x : vec3f, y : vec3f ) -> vec3f { return x - y * floor( x / y ); }' ),
mod_vec4: new CodeNode( 'fn tsl_mod_vec4( x : vec4f, y : vec4f ) -> vec4f { return x - y * floor( x / y ); }' ),
equals_bool: new CodeNode( 'fn tsl_equals_bool( a : bool, b : bool ) -> bool { return a == b; }' ),
equals_bvec2: new CodeNode( 'fn tsl_equals_bvec2( a : vec2f, b : vec2f ) -> vec2<bool> { return vec2<bool>( a.x == b.x, a.y == b.y ); }' ),
equals_bvec3: new CodeNode( 'fn tsl_equals_bvec3( a : vec3f, b : vec3f ) -> vec3<bool> { return vec3<bool>( a.x == b.x, a.y == b.y, a.z == b.z ); }' ),
equals_bvec4: new CodeNode( 'fn tsl_equals_bvec4( a : vec4f, b : vec4f ) -> vec4<bool> { return vec4<bool>( a.x == b.x, a.y == b.y, a.z == b.z, a.w == b.w ); }' ),
repeatWrapping_float: new CodeNode( 'fn tsl_repeatWrapping_float( coord: f32 ) -> f32 { return fract( coord ); }' ),
mirrorWrapping_float: new CodeNode( 'fn tsl_mirrorWrapping_float( coord: f32 ) -> f32 { let mirrored = fract( coord * 0.5 ) * 2.0; return 1.0 - abs( 1.0 - mirrored ); }' ),
clampWrapping_float: new CodeNode( 'fn tsl_clampWrapping_float( coord: f32 ) -> f32 { return clamp( coord, 0.0, 1.0 ); }' ),
biquadraticTexture: new CodeNode( /* wgsl */`
fn tsl_biquadraticTexture( map : texture_2d<f32>, coord : vec2f, iRes : vec2u, level : u32 ) -> vec4f {
let res = vec2f( iRes );
let uvScaled = coord * res;
let uvWrapping = ( ( uvScaled % res ) + res ) % res;
// https://www.shadertoy.com/view/WtyXRy
let uv = uvWrapping - 0.5;
let iuv = floor( uv );
let f = fract( uv );
let rg1 = textureLoad( map, vec2u( iuv + vec2( 0.5, 0.5 ) ) % iRes, level );
let rg2 = textureLoad( map, vec2u( iuv + vec2( 1.5, 0.5 ) ) % iRes, level );
let rg3 = textureLoad( map, vec2u( iuv + vec2( 0.5, 1.5 ) ) % iRes, level );
let rg4 = textureLoad( map, vec2u( iuv + vec2( 1.5, 1.5 ) ) % iRes, level );
return mix( mix( rg1, rg2, f.x ), mix( rg3, rg4, f.x ), f.y );
}
` )
};
const wgslMethods = {
dFdx: 'dpdx',
dFdy: '- dpdy',
mod_float: 'tsl_mod_float',
mod_vec2: 'tsl_mod_vec2',
mod_vec3: 'tsl_mod_vec3',
mod_vec4: 'tsl_mod_vec4',
equals_bool: 'tsl_equals_bool',
equals_bvec2: 'tsl_equals_bvec2',
equals_bvec3: 'tsl_equals_bvec3',
equals_bvec4: 'tsl_equals_bvec4',
inversesqrt: 'inverseSqrt',
bitcast: 'bitcast<f32>'
};
// WebGPU issue: does not support pow() with negative base on Windows
if ( typeof navigator !== 'undefined' && /Windows/g.test( navigator.userAgent ) ) {
wgslPolyfill.pow_float = new CodeNode( 'fn tsl_pow_float( a : f32, b : f32 ) -> f32 { return select( -pow( -a, b ), pow( a, b ), a > 0.0 ); }' );
wgslPolyfill.pow_vec2 = new CodeNode( 'fn tsl_pow_vec2( a : vec2f, b : vec2f ) -> vec2f { return vec2f( tsl_pow_float( a.x, b.x ), tsl_pow_float( a.y, b.y ) ); }', [ wgslPolyfill.pow_float ] );
wgslPolyfill.pow_vec3 = new CodeNode( 'fn tsl_pow_vec3( a : vec3f, b : vec3f ) -> vec3f { return vec3f( tsl_pow_float( a.x, b.x ), tsl_pow_float( a.y, b.y ), tsl_pow_float( a.z, b.z ) ); }', [ wgslPolyfill.pow_float ] );
wgslPolyfill.pow_vec4 = new CodeNode( 'fn tsl_pow_vec4( a : vec4f, b : vec4f ) -> vec4f { return vec4f( tsl_pow_float( a.x, b.x ), tsl_pow_float( a.y, b.y ), tsl_pow_float( a.z, b.z ), tsl_pow_float( a.w, b.w ) ); }', [ wgslPolyfill.pow_float ] );
wgslMethods.pow_float = 'tsl_pow_float';
wgslMethods.pow_vec2 = 'tsl_pow_vec2';
wgslMethods.pow_vec3 = 'tsl_pow_vec3';
wgslMethods.pow_vec4 = 'tsl_pow_vec4';
}
//
let diagnostics = '';
if ( ( typeof navigator !== 'undefined' && /Firefox|Deno/g.test( navigator.userAgent ) ) !== true ) {
diagnostics += 'diagnostic( off, derivative_uniformity );\n';
}
/**
* A node builder targeting WGSL.
*
* This module generates WGSL shader code from node materials and also
* generates the respective bindings and vertex buffer definitions. These
* data are later used by the renderer to create render and compute pipelines
* for render objects.
*
* @augments NodeBuilder
*/
class WGSLNodeBuilder extends NodeBuilder {
/**
* Constructs a new WGSL node builder renderer.
*
* @param {Object3D} object - The 3D object.
* @param {Renderer} renderer - The renderer.
*/
constructor( object, renderer ) {
super( object, renderer, new WGSLNodeParser() );
/**
* A dictionary that holds for each shader stage ('vertex', 'fragment', 'compute')
* another dictionary which manages UBOs per group ('render','frame','object').
*
* @type {Object<String,Object<String,NodeUniformsGroup>>}
*/
this.uniformGroups = {};
/**
* A dictionary that holds for each shader stage a Map of builtins.
*
* @type {Object<String,Map<String,Object>>}
*/
this.builtins = {};
/**
* A dictionary that holds for each shader stage a Set of directives.
*
* @type {Object<String,Set<String>>}
*/
this.directives = {};
/**
* A map for managing scope arrays. Only relevant for when using
* {@link module:WorkgroupInfoNode} in context of compute shaders.
*
* @type {Map<String,Object>}
*/
this.scopedArrays = new Map();
}
/**
* Checks if the given texture requires a manual conversion to the working color space.
*
* @param {Texture} texture - The texture to check.
* @return {Boolean} Whether the given texture requires a conversion to working color space or not.
*/
needsToWorkingColorSpace( texture ) {
return texture.isVideoTexture === true && texture.colorSpace !== NoColorSpace;
}
/**
* Generates the WGSL snippet for sampled textures.
*
* @private
* @param {Texture} texture - The texture.
* @param {String} textureProperty - The name of the texture uniform in the shader.
* @param {String} uvSnippet - A WGSL snippet that represents texture coordinates used for sampling.
* @param {String?} depthSnippet - A WGSL snippet that represents 0-based texture array index to sample.
* @param {String} [shaderStage=this.shaderStage] - The shader stage this code snippet is generated for.
* @return {String} The WGSL snippet.
*/
_generateTextureSample( texture, textureProperty, uvSnippet, depthSnippet, shaderStage = this.shaderStage ) {
if ( shaderStage === 'fragment' ) {
if ( depthSnippet ) {
return `textureSample( ${ textureProperty }, ${ textureProperty }_sampler, ${ uvSnippet }, ${ depthSnippet } )`;
} else {
return `textureSample( ${ textureProperty }, ${ textureProperty }_sampler, ${ uvSnippet } )`;
}
} else if ( this.isFilteredTexture( texture ) ) {
return this.generateFilteredTexture( texture, textureProperty, uvSnippet );
} else {
return this.generateTextureLod( texture, textureProperty, uvSnippet, depthSnippet, '0' );
}
}
/**
* Generates the WGSL snippet when sampling video textures.
*
* @private
* @param {String} textureProperty - The name of the video texture uniform in the shader.
* @param {String} uvSnippet - A WGSL snippet that represents texture coordinates used for sampling.
* @param {String} [shaderStage=this.shaderStage] - The shader stage this code snippet is generated for.
* @return {String} The WGSL snippet.
*/
_generateVideoSample( textureProperty, uvSnippet, shaderStage = this.shaderStage ) {
if ( shaderStage === 'fragment' ) {
return `textureSampleBaseClampToEdge( ${ textureProperty }, ${ textureProperty }_sampler, vec2<f32>( ${ uvSnippet }.x, 1.0 - ${ uvSnippet }.y ) )`;
} else {
console.error( `WebGPURenderer: THREE.VideoTexture does not support ${ shaderStage } shader.` );
}
}
/**
* Generates the WGSL snippet when sampling textures with explicit mip level.
*
* @private
* @param {Texture} texture - The texture.
* @param {String} textureProperty - The name of the texture uniform in the shader.
* @param {String} uvSnippet - A WGSL snippet that represents texture coordinates used for sampling.
* @param {String} levelSnippet - A WGSL snippet that represents the mip level, with level 0 containing a full size version of the texture.
* @param {String?} depthSnippet - A WGSL snippet that represents 0-based texture array index to sample.
* @param {String} [shaderStage=this.shaderStage] - The shader stage this code snippet is generated for.
* @return {String} The WGSL snippet.
*/
_generateTextureSampleLevel( texture, textureProperty, uvSnippet, levelSnippet, depthSnippet, shaderStage = this.shaderStage ) {
if ( ( shaderStage === 'fragment' || shaderStage === 'compute' ) && this.isUnfilterable( texture ) === false ) {
return `textureSampleLevel( ${ textureProperty }, ${ textureProperty }_sampler, ${ uvSnippet }, ${ levelSnippet } )`;
} else if ( this.isFilteredTexture( texture ) ) {
return this.generateFilteredTexture( texture, textureProperty, uvSnippet, levelSnippet );
} else {
return this.generateTextureLod( texture, textureProperty, uvSnippet, depthSnippet, levelSnippet );
}
}
/**
* Generates a wrap function used in context of textures.
*
* @param {Texture} texture - The texture to generate the function for.
* @return {String} The name of the generated function.
*/
generateWrapFunction( texture ) {
const functionName = `tsl_coord_${ wrapNames[ texture.wrapS ] }S_${ wrapNames[ texture.wrapT ] }_${texture.isData3DTexture ? '3d' : '2d'}T`;
let nodeCode = wgslCodeCache[ functionName ];
if ( nodeCode === undefined ) {
const includes = [];
// For 3D textures, use vec3f; for texture arrays, keep vec2f since array index is separate
const coordType = texture.isData3DTexture ? 'vec3f' : 'vec2f';
let code = `fn ${functionName}( coord : ${coordType} ) -> ${coordType} {\n\n\treturn ${coordType}(\n`;
const addWrapSnippet = ( wrap, axis ) => {
if ( wrap === RepeatWrapping ) {
includes.push( wgslPolyfill.repeatWrapping_float );
code += `\t\ttsl_repeatWrapping_float( coord.${ axis } )`;
} else if ( wrap === ClampToEdgeWrapping ) {
includes.push( wgslPolyfill.clampWrapping_float );
code += `\t\ttsl_clampWrapping_float( coord.${ axis } )`;
} else if ( wrap === MirroredRepeatWrapping ) {
includes.push( wgslPolyfill.mirrorWrapping_float );
code += `\t\ttsl_mirrorWrapping_float( coord.${ axis } )`;
} else {
code += `\t\tcoord.${ axis }`;
console.warn( `WebGPURenderer: Unsupported texture wrap type "${ wrap }" for vertex shader.` );
}
};
addWrapSnippet( texture.wrapS, 'x' );
code += ',\n';
addWrapSnippet( texture.wrapT, 'y' );
if ( texture.isData3DTexture ) {
code += ',\n';
addWrapSnippet( texture.wrapR, 'z' );
}
code += '\n\t);\n\n}\n';
wgslCodeCache[ functionName ] = nodeCode = new CodeNode( code, includes );
}
nodeCode.build( this );
return functionName;
}
/**
* Generates the array declaration string.
*
* @param {String} type - The type.
* @param {Number?} [count] - The count.
* @return {String} The generated value as a shader string.
*/
generateArrayDeclaration( type, count ) {
return `array< ${ this.getType( type ) }, ${ count } >`;
}
/**
* Generates a WGSL variable that holds the texture dimension of the given texture.
* It also returns information about the the number of layers (elements) of an arrayed
* texture as well as the cube face count of cube textures.
*
* @param {Texture} texture - The texture to generate the function for.
* @param {String} textureProperty - The name of the video texture uniform in the shader.
* @param {String} levelSnippet - A WGSL snippet that represents the mip level, with level 0 containing a full size version of the texture.
* @return {String} The name of the dimension variable.
*/
generateTextureDimension( texture, textureProperty, levelSnippet ) {
const textureData = this.getDataFromNode( texture, this.shaderStage, this.globalCache );
if ( textureData.dimensionsSnippet === undefined ) textureData.dimensionsSnippet = {};
let textureDimensionNode = textureData.dimensionsSnippet[ levelSnippet ];
if ( textureData.dimensionsSnippet[ levelSnippet ] === undefined ) {
let textureDimensionsParams;
let dimensionType;
const { primarySamples } = this.renderer.backend.utils.getTextureSampleData( texture );
const isMultisampled = primarySamples > 1;
if ( texture.isData3DTexture ) {
dimensionType = 'vec3<u32>';
} else {
// Regular 2D textures, depth textures, etc.
dimensionType = 'vec2<u32>';
}
// Build parameters string based on texture type and multisampling
if ( isMultisampled || texture.isVideoTexture || texture.isStorageTexture ) {
textureDimensionsParams = textureProperty;
} else {
textureDimensionsParams = `${textureProperty}${levelSnippet ? `, u32( ${ levelSnippet } )` : ''}`;
}
textureDimensionNode = new VarNode( new ExpressionNode( `textureDimensions( ${ textureDimensionsParams } )`, dimensionType ) );
textureData.dimensionsSnippet[ levelSnippet ] = textureDimensionNode;
if ( texture.isDataArrayTexture || texture.isData3DTexture ) {
textureData.arrayLayerCount = new VarNode(
new ExpressionNode(
`textureNumLayers(${textureProperty})`,
'u32'
)
);
}
// For cube textures, we know it's always 6 faces
if ( texture.isTextureCube ) {
textureData.cubeFaceCount = new VarNode(
new ExpressionNode( '6u', 'u32' )
);
}
}
return textureDimensionNode.build( this );
}
/**
* Generates the WGSL snippet for a manual filtered texture.
*
* @param {Texture} texture - The texture.
* @param {String} textureProperty - The name of the texture uniform in the shader.
* @param {String} uvSnippet - A WGSL snippet that represents texture coordinates used for sampling.
* @param {String} levelSnippet - A WGSL snippet that represents the mip level, with level 0 containing a full size version of the texture.
* @return {String} The WGSL snippet.
*/
generateFilteredTexture( texture, textureProperty, uvSnippet, levelSnippet = '0u' ) {
this._include( 'biquadraticTexture' );
const wrapFunction = this.generateWrapFunction( texture );
const textureDimension = this.generateTextureDimension( texture, textureProperty, levelSnippet );
return `tsl_biquadraticTexture( ${ textureProperty }, ${ wrapFunction }( ${ uvSnippet } ), ${ textureDimension }, u32( ${ levelSnippet } ) )`;
}
/**
* Generates the WGSL snippet for a texture lookup with explicit level-of-detail.
* Since it's a lookup, no sampling or filtering is applied.
*
* @param {Texture} texture - The texture.
* @param {String} textureProperty - The name of the texture uniform in the shader.
* @param {String} uvSnippet - A WGSL snippet that represents texture coordinates used for sampling.
* @param {String?} depthSnippet - A WGSL snippet that represents 0-based texture array index to sample.
* @param {String} [levelSnippet='0u'] - A WGSL snippet that represents the mip level, with level 0 containing a full size version of the texture.
* @return {String} The WGSL snippet.
*/
generateTextureLod( texture, textureProperty, uvSnippet, depthSnippet, levelSnippet = '0u' ) {
const wrapFunction = this.generateWrapFunction( texture );
const textureDimension = this.generateTextureDimension( texture, textureProperty, levelSnippet );
const vecType = texture.isData3DTexture ? 'vec3' : 'vec2';
const coordSnippet = `${vecType}<u32>(${wrapFunction}(${uvSnippet}) * ${vecType}<f32>(${textureDimension}))`;
return this.generateTextureLoad( texture, textureProperty, coordSnippet, depthSnippet, levelSnippet );
}
/**
* Generates the WGSL snippet that reads a single texel from a texture without sampling or filtering.
*
* @param {Texture} texture - The texture.
* @param {String} textureProperty - The name of the texture uniform in the shader.
* @param {String} uvIndexSnippet - A WGSL snippet that represents texture coordinates used for sampling.
* @param {String?} depthSnippet - A WGSL snippet that represents 0-based texture array index to sample.
* @param {String} [levelSnippet='0u'] - A WGSL snippet that represents the mip level, with level 0 containing a full size version of the texture.
* @return {String} The WGSL snippet.
*/
generateTextureLoad( texture, textureProperty, uvIndexSnippet, depthSnippet, levelSnippet = '0u' ) {
if ( texture.isVideoTexture === true || texture.isStorageTexture === true ) {
return `textureLoad( ${ textureProperty }, ${ uvIndexSnippet } )`;
} else if ( depthSnippet ) {
return `textureLoad( ${ textureProperty }, ${ uvIndexSnippet }, ${ depthSnippet }, u32( ${ levelSnippet } ) )`;
} else {
return `textureLoad( ${ textureProperty }, ${ uvIndexSnippet }, u32( ${ levelSnippet } ) )`;
}
}
/**
* Generates the WGSL snippet that writes a single texel to a texture.
*
* @param {Texture} texture - The texture.
* @param {String} textureProperty - The name of the texture uniform in the shader.
* @param {String} uvIndexSnippet - A WGSL snippet that represents texture coordinates used for sampling.
* @param {String} valueSnippet - A WGSL snippet that represent the new texel value.
* @return {String} The WGSL snippet.
*/
generateTextureStore( texture, textureProperty, uvIndexSnippet, valueSnippet ) {
return `textureStore( ${ textureProperty }, ${ uvIndexSnippet }, ${ valueSnippet } )`;
}
/**
* Returns `true` if the sampled values of the given texture should be compared against a reference value.
*
* @param {Texture} texture - The texture.
* @return {Boolean} Whether the sampled values of the given texture should be compared against a reference value or not.
*/
isSampleCompare( texture ) {
return texture.isDepthTexture === true && texture.compareFunction !== null;
}
/**
* Returns `true` if the given texture is unfilterable.
*
* @param {Texture} texture - The texture.
* @return {Boolean} Whether the given texture is unfilterable or not.
*/
isUnfilterable( texture ) {
return this.getComponentTypeFromTexture( texture ) !== 'float' ||
( ! this.isAvailable( 'float32Filterable' ) && texture.isDataTexture === true && texture.type === FloatType ) ||
( this.isSampleCompare( texture ) === false && texture.minFilter === NearestFilter && texture.magFilter === NearestFilter ) ||
this.renderer.backend.utils.getTextureSampleData( texture ).primarySamples > 1;
}
/**
* Generates the WGSL snippet for sampling/loading the given texture.
*
* @param {Texture} texture - The texture.
* @param {String} textureProperty - The name of the texture uniform in the shader.
* @param {String} uvSnippet - A WGSL snippet that represents texture coordinates used for sampling.
* @param {String?} depthSnippet - A WGSL snippet that represents 0-based texture array index to sample.
* @param {String} [shaderStage=this.shaderStage] - The shader stage this code snippet is generated for.
* @return {String} The WGSL snippet.
*/
generateTexture( texture, textureProperty, uvSnippet, depthSnippet, shaderStage = this.shaderStage ) {
let snippet = null;
if ( texture.isVideoTexture === true ) {
snippet = this._generateVideoSample( textureProperty, uvSnippet, shaderStage );
} else if ( this.isUnfilterable( texture ) ) {
snippet = this.generateTextureLod( texture, textureProperty, uvSnippet, depthSnippet, '0', shaderStage );
} else {
snippet = this._generateTextureSample( texture, textureProperty, uvSnippet, depthSnippet, shaderStage );
}
return snippet;
}
/**
* Generates the WGSL snippet for sampling/loading the given texture using explicit gradients.
*
* @param {Texture} texture - The texture.
* @param {String} textureProperty - The name of the texture uniform in the shader.
* @param {String} uvSnippet - A WGSL snippet that represents texture coordinates used for sampling.
* @param {Array<String>} gradSnippet - An array holding both gradient WGSL snippets.
* @param {String?} depthSnippet - A WGSL snippet that represents 0-based texture array index to sample.
* @param {String} [shaderStage=this.shaderStage] - The shader stage this code snippet is generated for.
* @return {String} The WGSL snippet.
*/
generateTextureGrad( texture, textureProperty, uvSnippet, gradSnippet, depthSnippet, shaderStage = this.shaderStage ) {
if ( shaderStage === 'fragment' ) {
// TODO handle i32 or u32 --> uvSnippet, array_index: A, ddx, ddy
return `textureSampleGrad( ${ textureProperty }, ${ textureProperty }_sampler, ${ uvSnippet }, ${ gradSnippet[ 0 ] }, ${ gradSnippet[ 1 ] } )`;
} else {
console.error( `WebGPURenderer: THREE.TextureNode.gradient() does not support ${ shaderStage } shader.` );
}
}
/**
* Generates the WGSL snippet for sampling a depth texture and comparing the sampled depth values
* against a reference value.
*
* @param {Texture} texture - The texture.
* @param {String} textureProperty - The name of the texture uniform in the shader.
* @param {String} uvSnippet - A WGSL snippet that represents texture coordinates used for sampling.
* @param {String} compareSnippet - A WGSL snippet that represents the reference value.
* @param {String?} depthSnippet - A WGSL snippet that represents 0-based texture array index to sample.
* @param {String} [shaderStage=this.shaderStage] - The shader stage this code snippet is generated for.
* @return {String} The WGSL snippet.
*/
generateTextureCompare( texture, textureProperty, uvSnippet, compareSnippet, depthSnippet, shaderStage = this.shaderStage ) {
if ( shaderStage === 'fragment' ) {
return `textureSampleCompare( ${ textureProperty }, ${ textureProperty }_sampler, ${ uvSnippet }, ${ compareSnippet } )`;
} else {
console.error( `WebGPURenderer: THREE.DepthTexture.compareFunction() does not support ${ shaderStage } shader.` );
}
}
/**
* Generates the WGSL snippet when sampling textures with explicit mip level.
*
* @param {Texture} texture - The texture.
* @param {String} textureProperty - The name of the texture uniform in the shader.
* @param {String} uvSnippet - A WGSL snippet that represents texture coordinates used for sampling.
* @param {String} levelSnippet - A WGSL snippet that represents the mip level, with level 0 containing a full size version of the texture.
* @param {String?} depthSnippet - A WGSL snippet that represents 0-based texture array index to sample.
* @param {String} [shaderStage=this.shaderStage] - The shader stage this code snippet is generated for.
* @return {String} The WGSL snippet.
*/
generateTextureLevel( texture, textureProperty, uvSnippet, levelSnippet, depthSnippet, shaderStage = this.shaderStage ) {
let snippet = null;
if ( texture.isVideoTexture === true ) {
snippet = this._generateVideoSample( textureProperty, uvSnippet, shaderStage );
} else {
snippet = this._generateTextureSampleLevel( texture, textureProperty, uvSnippet, levelSnippet, depthSnippet, shaderStage );
}
return snippet;
}
/**
* Generates the WGSL snippet when sampling textures with a bias to the mip level.
*
* @param {Texture} texture - The texture.
* @param {String} textureProperty - The name of the texture uniform in the shader.
* @param {String} uvSnippet - A WGSL snippet that represents texture coordinates used for sampling.
* @param {String} biasSnippet - A WGSL snippet that represents the bias to apply to the mip level before sampling.
* @param {String?} depthSnippet - A WGSL snippet that represents 0-based texture array index to sample.
* @param {String} [shaderStage=this.shaderStage] - The shader stage this code snippet is generated for.
* @return {String} The WGSL snippet.
*/
generateTextureBias( texture, textureProperty, uvSnippet, biasSnippet, depthSnippet, shaderStage = this.shaderStage ) {
if ( shaderStage === 'fragment' ) {
return `textureSampleBias( ${ textureProperty }, ${ textureProperty }_sampler, ${ uvSnippet }, ${ biasSnippet } )`;
} else {
console.error( `WebGPURenderer: THREE.TextureNode.biasNode does not support ${ shaderStage } shader.` );
}
}
/**
* Returns a WGSL snippet that represents the property name of the given node.
*
* @param {Node} node - The node.
* @param {String} [shaderStage=this.shaderStage] - The shader stage this code snippet is generated for.
* @return {String} The property name.
*/
getPropertyName( node, shaderStage = this.shaderStage ) {
if ( node.isNodeVarying === true && node.needsInterpolation === true ) {
if ( shaderStage === 'vertex' ) {
return `varyings.${ node.name }`;
}
} else if ( node.isNodeUniform === true ) {
const name = node.name;
const type = node.type;
if ( type === 'texture' || type === 'cubeTexture' || type === 'storageTexture' || type === 'texture3D' ) {
return name;
} else if ( type === 'buffer' || type === 'storageBuffer' || type === 'indirectStorageBuffer' ) {
if ( this.isCustomStruct( node ) ) {
return name;
}
return name + '.value';
} else {
return node.groupNode.name + '.' + name;
}
}
return super.getPropertyName( node );
}
/**
* Returns the output struct name.
*
* @return {String} The name of the output struct.
*/
getOutputStructName() {
return 'output';
}
/**
* Returns uniforms group count for the given shader stage.
*
* @private
* @param {String} shaderStage - The shader stage.
* @return {Number} The uniforms group count for the given shader stage.
*/
_getUniformGroupCount( shaderStage ) {
return Object.keys( this.uniforms[ shaderStage ] ).length;
}
/**
* Returns the native shader operator name for a given generic name.
*
* @param {String} op - The operator name to resolve.
* @return {String} The resolved operator name.
*/
getFunctionOperator( op ) {
const fnOp = wgslFnOpLib[ op ];
if ( fnOp !== undefined ) {
this._include( fnOp );
return fnOp;
}
return null;
}
/**
* Returns the node access for the given node and shader stage.
*
* @param {StorageTextureNode|StorageBufferNode} node - The storage node.
* @param {String} shaderStage - The shader stage.
* @return {String} The node access.
*/
getNodeAccess( node, shaderStage ) {
if ( shaderStage !== 'compute' )
return NodeAccess.READ_ONLY;
return node.access;
}
/**
* Returns A WGSL snippet representing the storage access.
*
* @param {StorageTextureNode|StorageBufferNode} node - The storage node.
* @param {String} shaderStage - The shader stage.
* @return {String} The WGSL snippet representing the storage access.
*/
getStorageAccess( node, shaderStage ) {
return accessNames[ this.getNodeAccess( node, shaderStage ) ];
}
/**
* This method is one of the more important ones since it's responsible
* for generating a matching binding instance for the given uniform node.
*
* These bindings are later used in the renderer to create bind groups
* and layouts.
*
* @param {UniformNode} node - The uniform node.
* @param {String} type - The node data type.
* @param {String} shaderStage - The shader stage.
* @param {String?} [name=null] - An optional uniform name.
* @return {NodeUniform} The node uniform object.
*/
getUniformFromNode( node, type, shaderStage, name = null ) {
const uniformNode = super.getUniformFromNode( node, type, shaderStage, name );
const nodeData = this.getDataFromNode( node, shaderStage, this.globalCache );
if ( nodeData.uniformGPU === undefined ) {
let uniformGPU;
const group = node.groupNode;
const groupName = group.name;
const bindings = this.getBindGroupArray( groupName, shaderStage );
if ( type === 'texture' || type === 'cubeTexture' || type === 'storageTexture' || type === 'texture3D' ) {
let texture = null;
const access = this.getNodeAccess( node, shaderStage );
if ( type === 'texture' || type === 'storageTexture' ) {
texture = new NodeSampledTexture( uniformNode.name, uniformNode.node, group, access );
} else if ( type === 'cubeTexture' ) {
texture = new NodeSampledCubeTexture( uniformNode.name, uniformNode.node, group, access );
} else if ( type === 'texture3D' ) {
texture = new NodeSampledTexture3D( uniformNode.name, uniformNode.node, group, access );
}
texture.store = node.isStorageTextureNode === true;
texture.setVisibility( gpuShaderStageLib[ shaderStage ] );
if ( ( shaderStage === 'fragment' || shaderStage === 'compute' ) && this.isUnfilterable( node.value ) === false && texture.store === false ) {
const sampler = new NodeSampler( `${ uniformNode.name }_sampler`, uniformNode.node, group );
sampler.setVisibility( gpuShaderStageLib[ shaderStage ] );
bindings.push( sampler, texture );
uniformGPU = [ sampler, texture ];
} else {
bindings.push( texture );
uniformGPU = [ texture ];
}
} else if ( type === 'buffer' || type === 'storageBuffer' || type === 'indirectStorageBuffer' ) {
const bufferClass = type === 'buffer' ? NodeUniformBuffer : NodeStorageBuffer;
const buffer = new bufferClass( node, group );
buffer.setVisibility( gpuShaderStageLib[ shaderStage ] );
bindings.push( buffer );
uniformGPU = buffer;
uniformNode.name = name ? name : 'NodeBuffer_' + uniformNode.id;
} else {
const uniformsStage = this.uniformGroups[ shaderStage ] || ( this.uniformGroups[ shaderStage ] = {} );
let uniformsGroup = uniformsStage[ groupName ];
if ( uniformsGroup === undefined ) {
uniformsGroup = new NodeUniformsGroup( groupName, group );
uniformsGroup.setVisibility( gpuShaderStageLib[ shaderStage ] );
uniformsStage[ groupName ] = uniformsGroup;
bindings.push( uniformsGroup );
}
uniformGPU = this.getNodeUniform( uniformNode, type );
uniformsGroup.addUniform( uniformGPU );
}
nodeData.uniformGPU = uniformGPU;
}
return uniformNode;
}
/**
* This method should be used whenever builtins are required in nodes.
* The internal builtins data structure will make sure builtins are
* defined in the WGSL source.
*
* @param {String} name - The builtin name.
* @param {String} property - The property name.
* @param {String} type - The node data type.
* @param {String} [shaderStage=this.shaderStage] - The shader stage this code snippet is generated for.
* @return {String} The property name.
*/
getBuiltin( name, property, type, shaderStage = this.shaderStage ) {
const map = this.builtins[ shaderStage ] || ( this.builtins[ shaderStage ] = new Map() );
if ( map.has( name ) === false ) {
map.set( name, {
name,
property,
type
} );
}
return property;
}
/**
* Returns `true` if the given builtin is defined in the given shader stage.
*
* @param {String} name - The builtin name.
* @param {String} [shaderStage=this.shaderStage] - The shader stage this code snippet is generated for.
* @return {String} Whether the given builtin is defined in the given shader stage or not.
*/
hasBuiltin( name, shaderStage = this.shaderStage ) {
return ( this.builtins[ shaderStage ] !== undefined && this.builtins[ shaderStage ].has( name ) );
}
/**
* Returns the vertex index builtin.
*
* @return {String} The vertex index.
*/
getVertexIndex() {
if ( this.shaderStage === 'vertex' ) {
return this.getBuiltin( 'vertex_index', 'vertexIndex', 'u32', 'attribute' );
}
return 'vertexIndex';
}
/**
* Builds the given shader node.
*
* @param {ShaderNodeInternal} shaderNode - The shader node.
* @return {String} The WGSL function code.
*/
buildFunctionCode( shaderNode ) {
const layout = shaderNode.layout;
const flowData = this.flowShaderNode( shaderNode );
const parameters = [];
for ( const input of layout.inputs ) {
parameters.push( input.name + ' : ' + this.getType( input.type ) );
}
//
let code = `fn ${ layout.name }( ${ parameters.join( ', ' ) } ) -> ${ this.getType( layout.type ) } {
${ flowData.vars }
${ flowData.code }
`;
if ( flowData.result ) {
code += `\treturn ${ flowData.result };\n`;
}
code += '\n}\n';
//
return code;
}
/**
* Returns the instance index builtin.
*
* @return {String} The instance index.
*/
getInstanceIndex() {
if ( this.shaderStage === 'vertex' ) {
return this.getBuiltin( 'instance_index', 'instanceIndex', 'u32', 'attribute' );
}
return 'instanceIndex';
}
/**
* Returns the invocation local index builtin.
*
* @return {String} The invocation local index.
*/
getInvocationLocalIndex() {
return this.getBuiltin( 'local_invocation_index', 'invocationLocalIndex', 'u32', 'attribute' );
}
/**
* Returns the subgroup size builtin.
*
* @return {String} The subgroup size.
*/
getSubgroupSize() {
this.enableSubGroups();
return this.getBuiltin( 'subgroup_size', 'subgroupSize', 'u32', 'attribute' );
}
/**
* Returns the invocation subgroup index builtin.
*
* @return {String} The invocation subgroup index.
*/
getInvocationSubgroupIndex() {
this.enableSubGroups();
return this.getBuiltin( 'subgroup_invocation_id', 'invocationSubgroupIndex', 'u32', 'attribute' );
}
/**
* Returns the subgroup index builtin.
*
* @return {String} The subgroup index.
*/
getSubgroupIndex() {
this.enableSubGroups();
return this.getBuiltin( 'subgroup_id', 'subgroupIndex', 'u32', 'attribute' );
}
/**
* Overwritten as a NOP since this method is intended for the WebGL 2 backend.
*
* @return {null} Null.
*/
getDrawIndex() {
return null;
}
/**
* Returns the front facing builtin.
*
* @return {String} The front facing builtin.
*/
getFrontFacing() {
return this.getBuiltin( 'front_facing', 'isFront', 'bool' );
}
/**
* Returns the frag coord builtin.
*
* @return {String} The frag coord builtin.
*/
getFragCoord() {
return this.getBuiltin( 'position', 'fragCoord', 'vec4<f32>' ) + '.xy';
}
/**
* Returns the frag depth builtin.
*
* @return {String} The frag depth builtin.
*/
getFragDepth() {
return 'output.' + this.getBuiltin( 'frag_depth', 'depth', 'f32', 'output' );
}
/**
* Returns the clip distances builtin.
*
* @return {String} The clip distances builtin.
*/
getClipDistance() {
return 'varyings.hw_clip_distances';
}
/**
* Whether to flip texture data along its vertical axis or not.
*
* @return {Boolean} Returns always `false` in context of WGSL.
*/
isFlipY() {
return false;
}
/**
* Enables the given directive for the given shader stage.
*
* @param {String} name - The directive name.
* @param {String} [shaderStage=this.shaderStage] - The shader stage to enable the directive for.
*/
enableDirective( name, shaderStage = this.shaderStage ) {
const stage = this.directives[ shaderStage ] || ( this.directives[ shaderStage ] = new Set() );
stage.add( name );
}
/**
* Returns the directives of the given shader stage as a WGSL string.
*
* @param {String} shaderStage - The shader stage.
* @return {String} A WGSL snippet that enables the directives of the given stage.
*/
getDirectives( shaderStage ) {
const snippets = [];
const directives = this.directives[ shaderStage ];
if ( directives !== undefined ) {
for ( const directive of directives ) {
snippets.push( `enable ${directive};` );
}
}
return snippets.join( '\n' );
}
/**
* Enables the 'subgroups' directive.
*/
enableSubGroups() {
this.enableDirective( 'subgroups' );
}
/**
* Enables the 'subgroups-f16' directive.
*/
enableSubgroupsF16() {
this.enableDirective( 'subgroups-f16' );
}
/**
* Enables the 'clip_distances' directive.
*/
enableClipDistances() {
this.enableDirective( 'clip_distances' );
}
/**
* Enables the 'f16' directive.
*/
enableShaderF16() {
this.enableDirective( 'f16' );
}
/**
* Enables the 'dual_source_blending' directive.
*/
enableDualSourceBlending() {
this.enableDirective( 'dual_source_blending' );
}
/**
* Enables hardware clipping.
*
* @param {String} planeCount - The clipping plane count.
*/
enableHardwareClipping( planeCount ) {
this.enableClipDistances();
this.getBuiltin( 'clip_distances', 'hw_clip_distances', `array<f32, ${ planeCount } >`, 'vertex' );
}
/**
* Returns the builtins of the given shader stage as a WGSL string.
*
* @param {String} shaderStage - The shader stage.
* @return {String} A WGSL snippet that represents the builtins of the given stage.
*/
getBuiltins( shaderStage ) {
const snippets = [];
const builtins = this.builtins[ shaderStage ];
if ( builtins !== undefined ) {
for ( const { name, property, type } of builtins.values() ) {
snippets.push( `@builtin( ${name} ) ${property} : ${type}` );
}
}
return snippets.join( ',\n\t' );
}
/**
* This method should be used when a new scoped buffer is used in context of
* compute shaders. It adds the array to the internal data structure which is
* later used to generate the respective WGSL.
*
* @param {String} name - The array name.
* @param {String} scope - The scope.
* @param {String} bufferType - The buffer type.
* @param {String} bufferCount - The buffer count.
* @return {String} The array name.
*/
getScopedArray( name, scope, bufferType, bufferCount ) {
if ( this.scopedArrays.has( name ) === false ) {
this.scopedArrays.set( name, {
name,
scope,
bufferType,
bufferCount
} );
}
return name;
}
/**
* Returns the scoped arrays of the given shader stage as a WGSL string.
*
* @param {String} shaderStage - The shader stage.
* @return {String|undefined} The WGSL snippet that defines the scoped arrays.
* Returns `undefined` when used in the vertex or fragment stage.
*/
getScopedArrays( shaderStage ) {
if ( shaderStage !== 'compute' ) {
return;
}
const snippets = [];
for ( const { name, scope, bufferType, bufferCount } of this.scopedArrays.values() ) {
const type = this.getType( bufferType );
snippets.push( `var<${scope}> ${name}: array< ${type}, ${bufferCount} >;` );
}
return snippets.join( '\n' );
}
/**
* Returns the shader attributes of the given shader stage as a WGSL string.
*
* @param {String} shaderStage - The shader stage.
* @return {String} The WGSL snippet that defines the shader attributes.
*/
getAttributes( shaderStage ) {
const snippets = [];
if ( shaderStage === 'compute' ) {
this.getBuiltin( 'global_invocation_id', 'globalId', 'vec3<u32>', 'attribute' );
this.getBuiltin( 'workgroup_id', 'workgroupId', 'vec3<u32>', 'attribute' );
this.getBuiltin( 'local_invocation_id', 'localId', 'vec3<u32>', 'attribute' );
this.getBuiltin( 'num_workgroups', 'numWorkgroups', 'vec3<u32>', 'attribute' );
if ( this.renderer.hasFeature( 'subgroups' ) ) {
this.enableDirective( 'subgroups', shaderStage );
this.getBuiltin( 'subgroup_size', 'subgroupSize', 'u32', 'attribute' );
}
}
if ( shaderStage === 'vertex' || shaderStage === 'compute' ) {
const builtins = this.getBuiltins( 'attribute' );
if ( builtins ) snippets.push( builtins );
const attributes = this.getAttributesArray();
for ( let index = 0, length = attributes.length; index < length; index ++ ) {
const attribute = attributes[ index ];
const name = attribute.name;
const type = this.getType( attribute.type );
snippets.push( `@location( ${index} ) ${ name } : ${ type }` );
}
}
return snippets.join( ',\n\t' );
}
/**
* Returns the members of the given struct type node as a WGSL string.
*
* @param {StructTypeNode} struct - The struct type node.
* @return {String} The WGSL snippet that defines the struct members.
*/
getStructMembers( struct ) {
const snippets = [];
for ( const member of struct.members ) {
const prefix = struct.output ? '@location( ' + member.index + ' ) ' : '';
let type = this.getType( member.type );
if ( member.atomic ) {
type = 'atomic< ' + type + ' >';
}
snippets.push( `\t${ prefix + member.name } : ${ type }` );
}
return snippets.join( ',\n' );
}
/**
* Returns the structs of the given shader stage as a WGSL string.
*
* @param {String} shaderStage - The shader stage.
* @return {String} The WGSL snippet that defines the structs.
*/
getStructs( shaderStage ) {
let result = '';
const structs = this.structs[ shaderStage ];
if ( structs.length > 0 ) {
const snippets = [];
for ( const struct of structs ) {
let snippet = `struct ${ struct.name } {\n`;
snippet += this.getStructMembers( struct );
snippet += '\n};';
snippets.push( snippet );
}
result = '\n' + snippets.join( '\n\n' ) + '\n';
}
return result;
}
/**
* Returns a WGSL string representing a variable.
*
* @param {String} type - The variable's type.
* @param {String} name - The variable's name.
* @param {Number?} [count=null] - The array length.
* @return {String} The WGSL snippet that defines a variable.
*/
getVar( type, name, count = null ) {
let snippet = `var ${ name } : `;
if ( count !== null ) {
snippet += this.generateArrayDeclaration( type, count );
} else {
snippet += this.getType( type );
}
return snippet;
}
/**
* Returns the variables of the given shader stage as a WGSL string.
*
* @param {String} shaderStage - The shader stage.
* @return {String} The WGSL snippet that defines the variables.
*/
getVars( shaderStage ) {
const snippets = [];
const vars = this.vars[ shaderStage ];
if ( vars !== undefined ) {
for ( const variable of vars ) {
snippets.push( `\t${ this.getVar( variable.type, variable.name, variable.count ) };` );
}
}
return `\n${ snippets.join( '\n' ) }\n`;
}
/**
* Returns the varyings of the given shader stage as a WGSL string.
*
* @param {String} shaderStage - The shader stage.
* @return {String} The WGSL snippet that defines the varyings.
*/
getVaryings( shaderStage ) {
const snippets = [];
if ( shaderStage === 'vertex' ) {
this.getBuiltin( 'position', 'Vertex', 'vec4<f32>', 'vertex' );
}
if ( shaderStage === 'vertex' || shaderStage === 'fragment' ) {
const varyings = this.varyings;
const vars = this.vars[ shaderStage ];
for ( let index = 0; index < varyings.length; index ++ ) {
const varying = varyings[ index ];
if ( varying.needsInterpolation ) {
let attributesSnippet = `@location( ${index} )`;
if ( /^(int|uint|ivec|uvec)/.test( varying.type ) ) {
attributesSnippet += ' @interpolate( flat )';
}
snippets.push( `${ attributesSnippet } ${ varying.name } : ${ this.getType( varying.type ) }` );
} else if ( shaderStage === 'vertex' && vars.includes( varying ) === false ) {
vars.push( varying );
}
}
}
const builtins = this.getBuiltins( shaderStage );
if ( builtins ) snippets.push( builtins );
const code = snippets.join( ',\n\t' );
return shaderStage === 'vertex' ? this._getWGSLStruct( 'VaryingsStruct', '\t' + code ) : code;
}
isCustomStruct( nodeUniform ) {
return nodeUniform.value.isStorageBufferAttribute && nodeUniform.node.structTypeNode !== null;
}
/**
* Returns the uniforms of the given shader stage as a WGSL string.
*
* @param {String} shaderStage - The shader stage.
* @return {String} The WGSL snippet that defines the uniforms.
*/
getUniforms( shaderStage ) {
const uniforms = this.uniforms[ shaderStage ];
const bindingSnippets = [];
const bufferSnippets = [];
const structSnippets = [];
const uniformGroups = {};
for ( const uniform of uniforms ) {
const groupName = uniform.groupNode.name;
const uniformIndexes = this.bindingsIndexes[ groupName ];
if ( uniform.type === 'texture' || uniform.type === 'cubeTexture' || uniform.type === 'storageTexture' || uniform.type === 'texture3D' ) {
const texture = uniform.node.value;
if ( ( shaderStage === 'fragment' || shaderStage === 'compute' ) && this.isUnfilterable( texture ) === false && uniform.node.isStorageTextureNode !== true ) {
if ( this.isSampleCompare( texture ) ) {
bindingSnippets.push( `@binding( ${ uniformIndexes.binding ++ } ) @group( ${ uniformIndexes.group } ) var ${ uniform.name }_sampler : sampler_comparison;` );
} else {
bindingSnippets.push( `@binding( ${ uniformIndexes.binding ++ } ) @group( ${ uniformIndexes.group } ) var ${ uniform.name }_sampler : sampler;` );
}
}
let textureType;
let multisampled = '';
const { primarySamples } = this.renderer.backend.utils.getTextureSampleData( texture );
if ( primarySamples > 1 ) {
multisampled = '_multisampled';
}
if ( texture.isCubeTexture === true ) {
textureType = 'texture_cube<f32>';
} else if ( texture.isDataArrayTexture === true || texture.isCompressedArrayTexture === true ) {
textureType = 'texture_2d_array<f32>';
} else if ( texture.isDepthTexture === true ) {
textureType = `texture_depth${multisampled}_2d`;
} else if ( texture.isVideoTexture === true ) {
textureType = 'texture_external';
} else if ( texture.isData3DTexture === true ) {
textureType = 'texture_3d<f32>';
} else if ( uniform.node.isStorageTextureNode === true ) {
const format = getFormat( texture );
const access = this.getStorageAccess( uniform.node, shaderStage );
textureType = `texture_storage_2d<${ format }, ${ access }>`;
} else {
const componentPrefix = this.getComponentTypeFromTexture( texture ).charAt( 0 );
textureType = `texture${ multisampled }_2d<${ componentPrefix }32>`;
}
bindingSnippets.push( `@binding( ${ uniformIndexes.binding ++ } ) @group( ${ uniformIndexes.group } ) var ${ uniform.name } : ${ textureType };` );
} else if ( uniform.type === 'buffer' || uniform.type === 'storageBuffer' || uniform.type === 'indirectStorageBuffer' ) {
const bufferNode = uniform.node;
const bufferType = this.getType( bufferNode.getNodeType( this ) );
const bufferCount = bufferNode.bufferCount;
const bufferCountSnippet = bufferCount > 0 && uniform.type === 'buffer' ? ', ' + bufferCount : '';
const bufferAccessMode = bufferNode.isStorageBufferNode ? `storage, ${ this.getStorageAccess( bufferNode, shaderStage ) }` : 'uniform';
if ( this.isCustomStruct( uniform ) ) {
bufferSnippets.push( `@binding( ${ uniformIndexes.binding ++ } ) @group( ${ uniformIndexes.group } ) var<${ bufferAccessMode }> ${ uniform.name } : ${ bufferType };` );
} else {
const bufferTypeSnippet = bufferNode.isAtomic ? `atomic<${ bufferType }>` : `${ bufferType }`;
const bufferSnippet = `\tvalue : array< ${ bufferTypeSnippet }${ bufferCountSnippet } >`;
bufferSnippets.push( this._getWGSLStructBinding( uniform.name, bufferSnippet, bufferAccessMode, uniformIndexes.binding ++, uniformIndexes.group ) );
}
} else {
const vectorType = this.getType( this.getVectorType( uniform.type ) );
const groupName = uniform.groupNode.name;
const group = uniformGroups[ groupName ] || ( uniformGroups[ groupName ] = {
index: uniformIndexes.binding ++,
id: uniformIndexes.group,
snippets: []
} );
group.snippets.push( `\t${ uniform.name } : ${ vectorType }` );
}
}
for ( const name in uniformGroups ) {
const group = uniformGroups[ name ];
structSnippets.push( this._getWGSLStructBinding( name, group.snippets.join( ',\n' ), 'uniform', group.index, group.id ) );
}
let code = bindingSnippets.join( '\n' );
code += bufferSnippets.join( '\n' );
code += structSnippets.join( '\n' );
return code;
}
/**
* Controls the code build of the shader stages.
*/
buildCode() {
const shadersData = this.material !== null ? { fragment: {}, vertex: {} } : { compute: {} };
this.sortBindingGroups();
for ( const shaderStage in shadersData ) {
this.shaderStage = shaderStage;
const stageData = shadersData[ shaderStage ];
stageData.uniforms = this.getUniforms( shaderStage );
stageData.attributes = this.getAttributes( shaderStage );
stageData.varyings = this.getVaryings( shaderStage );
stageData.structs = this.getStructs( shaderStage );
stageData.vars = this.getVars( shaderStage );
stageData.codes = this.getCodes( shaderStage );
stageData.directives = this.getDirectives( shaderStage );
stageData.scopedArrays = this.getScopedArrays( shaderStage );
//
let flow = '// code\n\n';
flow += this.flowCode[ shaderStage ];
const flowNodes = this.flowNodes[ shaderStage ];
const mainNode = flowNodes[ flowNodes.length - 1 ];
const outputNode = mainNode.outputNode;
const isOutputStruct = ( outputNode !== undefined && outputNode.isOutputStructNode === true );
for ( const node of flowNodes ) {
const flowSlotData = this.getFlowData( node/*, shaderStage*/ );
const slotName = node.name;
if ( slotName ) {
if ( flow.length > 0 ) flow += '\n';
flow += `\t// flow -> ${ slotName }\n`;
}
flow += `${ flowSlotData.code }\n\t`;
if ( node === mainNode && shaderStage !== 'compute' ) {
flow += '// result\n\n\t';
if ( shaderStage === 'vertex' ) {
flow += `varyings.Vertex = ${ flowSlotData.result };`;
} else if ( shaderStage === 'fragment' ) {
if ( isOutputStruct ) {
stageData.returnType = outputNode.getNodeType( this );
stageData.structs += 'var<private> output : ' + stageData.returnType + ';';
flow += `return ${ flowSlotData.result };`;
} else {
let structSnippet = '\t@location(0) color: vec4<f32>';
const builtins = this.getBuiltins( 'output' );
if ( builtins ) structSnippet += ',\n\t' + builtins;
stageData.returnType = 'OutputStruct';
stageData.structs += this._getWGSLStruct( 'OutputStruct', structSnippet );
stageData.structs += '\nvar<private> output : OutputStruct;';
flow += `output.color = ${ flowSlotData.result };\n\n\treturn output;`;
}
}
}
}
stageData.flow = flow;
}
this.shaderStage = null;
if ( this.material !== null ) {
this.vertexShader = this._getWGSLVertexCode( shadersData.vertex );
this.fragmentShader = this._getWGSLFragmentCode( shadersData.fragment );
} else {
this.computeShader = this._getWGSLComputeCode( shadersData.compute, ( this.object.workgroupSize || [ 64 ] ).join( ', ' ) );
}
}
/**
* Returns the native shader method name for a given generic name.
*
* @param {String} method - The method name to resolve.
* @param {String} [output=null] - An optional output.
* @return {String} The resolved WGSL method name.
*/
getMethod( method, output = null ) {
let wgslMethod;
if ( output !== null ) {
wgslMethod = this._getWGSLMethod( method + '_' + output );
}
if ( wgslMethod === undefined ) {
wgslMethod = this._getWGSLMethod( method );
}
return wgslMethod || method;
}
/**
* Returns the WGSL type of the given node data type.
*
* @param {String} type - The node data type.
* @return {String} The WGSL type.
*/
getType( type ) {
return wgslTypeLib[ type ] || type;
}
/**
* Whether the requested feature is available or not.
*
* @param {String} name - The requested feature.
* @return {Boolean} Whether the requested feature is supported or not.
*/
isAvailable( name ) {
let result = supports[ name ];
if ( result === undefined ) {
if ( name === 'float32Filterable' ) {
result = this.renderer.hasFeature( 'float32-filterable' );
} else if ( name === 'clipDistance' ) {
result = this.renderer.hasFeature( 'clip-distances' );
}
supports[ name ] = result;
}
return result;
}
/**
* Returns the native shader method name for a given generic name.
*
* @private
* @param {String} method - The method name to resolve.
* @return {String} The resolved WGSL method name.
*/
_getWGSLMethod( method ) {
if ( wgslPolyfill[ method ] !== undefined ) {
this._include( method );
}
return wgslMethods[ method ];
}
/**
* Includes the given method name into the current
* function node.
*
* @private
* @param {String} name - The method name to include.
* @return {CodeNode} The respective code node.
*/
_include( name ) {
const codeNode = wgslPolyfill[ name ];
codeNode.build( this );
if ( this.currentFunctionNode !== null ) {
this.currentFunctionNode.includes.push( codeNode );
}
return codeNode;
}
/**
* Returns a WGSL vertex shader based on the given shader data.
*
* @private
* @param {Object} shaderData - The shader data.
* @return {String} The vertex shader.
*/
_getWGSLVertexCode( shaderData ) {
return `${ this.getSignature() }
// directives
${shaderData.directives}
// structs
${shaderData.structs}
// uniforms
${shaderData.uniforms}
// varyings
${shaderData.varyings}
var<private> varyings : VaryingsStruct;
// codes
${shaderData.codes}
@vertex
fn main( ${shaderData.attributes} ) -> VaryingsStruct {
// vars
${shaderData.vars}
// flow
${shaderData.flow}
return varyings;
}
`;
}
/**
* Returns a WGSL fragment shader based on the given shader data.
*
* @private
* @param {Object} shaderData - The shader data.
* @return {String} The vertex shader.
*/
_getWGSLFragmentCode( shaderData ) {
return `${ this.getSignature() }
// global
${ diagnostics }
// structs
${shaderData.structs}
// uniforms
${shaderData.uniforms}
// codes
${shaderData.codes}
@fragment
fn main( ${shaderData.varyings} ) -> ${shaderData.returnType} {
// vars
${shaderData.vars}
// flow
${shaderData.flow}
}
`;
}
/**
* Returns a WGSL compute shader based on the given shader data.
*
* @private
* @param {Object} shaderData - The shader data.
* @param {String} workgroupSize - The workgroup size.
* @return {String} The vertex shader.
*/
_getWGSLComputeCode( shaderData, workgroupSize ) {
return `${ this.getSignature() }
// directives
${shaderData.directives}
// system
var<private> instanceIndex : u32;
// locals
${shaderData.scopedArrays}
// structs
${shaderData.structs}
// uniforms
${shaderData.uniforms}
// codes
${shaderData.codes}
@compute @workgroup_size( ${workgroupSize} )
fn main( ${shaderData.attributes} ) {
// system
instanceIndex = globalId.x + globalId.y * numWorkgroups.x * u32(${workgroupSize}) + globalId.z * numWorkgroups.x * numWorkgroups.y * u32(${workgroupSize});
// vars
${shaderData.vars}
// flow
${shaderData.flow}
}
`;
}
/**
* Returns a WGSL struct based on the given name and variables.
*
* @private
* @param {String} name - The struct name.
* @param {String} vars - The struct variables.
* @return {String} The WGSL snippet representing a struct.
*/
_getWGSLStruct( name, vars ) {
return `
struct ${name} {
${vars}
};`;
}
/**
* Returns a WGSL struct binding.
*
* @private
* @param {String} name - The struct name.
* @param {String} vars - The struct variables.
* @param {String} access - The access.
* @param {Number} [binding=0] - The binding index.
* @param {Number} [group=0] - The group index.
* @return {String} The WGSL snippet representing a struct binding.
*/
_getWGSLStructBinding( name, vars, access, binding = 0, group = 0 ) {
const structName = name + 'Struct';
const structSnippet = this._getWGSLStruct( structName, vars );
return `${structSnippet}
@binding( ${ binding } ) @group( ${ group } )
var<${access}> ${ name } : ${ structName };`;
}
}
/**
* A WebGPU backend utility module with common helpers.
*
* @private
*/
class WebGPUUtils {
/**
* Constructs a new utility object.
*
* @param {WebGPUBackend} backend - The WebGPU backend.
*/
constructor( backend ) {
/**
* A reference to the WebGPU backend.
*
* @type {WebGPUBackend}
*/
this.backend = backend;
}
/**
* Returns the depth/stencil GPU format for the given render context.
*
* @param {RenderContext} renderContext - The render context.
* @return {String} The depth/stencil GPU texture format.
*/
getCurrentDepthStencilFormat( renderContext ) {
let format;
if ( renderContext.depthTexture !== null ) {
format = this.getTextureFormatGPU( renderContext.depthTexture );
} else if ( renderContext.depth && renderContext.stencil ) {
format = GPUTextureFormat.Depth24PlusStencil8;
} else if ( renderContext.depth ) {
format = GPUTextureFormat.Depth24Plus;
}
return format;
}
/**
* Returns the GPU format for the given texture.
*
* @param {Texture} texture - The texture.
* @return {String} The GPU texture format.
*/
getTextureFormatGPU( texture ) {
return this.backend.get( texture ).format;
}
/**
* Returns an object that defines the multi-sampling state of the given texture.
*
* @param {Texture} texture - The texture.
* @return {Object} The multi-sampling state.
*/
getTextureSampleData( texture ) {
let samples;
if ( texture.isFramebufferTexture ) {
samples = 1;
} else if ( texture.isDepthTexture && ! texture.renderTarget ) {
const renderer = this.backend.renderer;
const renderTarget = renderer.getRenderTarget();
samples = renderTarget ? renderTarget.samples : renderer.samples;
} else if ( texture.renderTarget ) {
samples = texture.renderTarget.samples;
}
samples = samples || 1;
const isMSAA = samples > 1 && texture.renderTarget !== null && ( texture.isDepthTexture !== true && texture.isFramebufferTexture !== true );
const primarySamples = isMSAA ? 1 : samples;
return { samples, primarySamples, isMSAA };
}
/**
* Returns the default color attachment's GPU format of the current render context.
*
* @param {RenderContext} renderContext - The render context.
* @return {String} The GPU texture format of the default color attachment.
*/
getCurrentColorFormat( renderContext ) {
let format;
if ( renderContext.textures !== null ) {
format = this.getTextureFormatGPU( renderContext.textures[ 0 ] );
} else {
format = this.getPreferredCanvasFormat(); // default context format
}
return format;
}
/**
* Returns the output color space of the current render context.
*
* @param {RenderContext} renderContext - The render context.
* @return {String} The output color space.
*/
getCurrentColorSpace( renderContext ) {
if ( renderContext.textures !== null ) {
return renderContext.textures[ 0 ].colorSpace;
}
return this.backend.renderer.outputColorSpace;
}
/**
* Returns GPU primitive topology for the given object and material.
*
* @param {Object3D} object - The 3D object.
* @param {Material} material - The material.
* @return {String} The GPU primitive topology.
*/
getPrimitiveTopology( object, material ) {
if ( object.isPoints ) return GPUPrimitiveTopology.PointList;
else if ( object.isLineSegments || ( object.isMesh && material.wireframe === true ) ) return GPUPrimitiveTopology.LineList;
else if ( object.isLine ) return GPUPrimitiveTopology.LineStrip;
else if ( object.isMesh ) return GPUPrimitiveTopology.TriangleList;
}
/**
* Returns a modified sample count from the given sample count value.
*
* That is required since WebGPU does not support arbitrary sample counts.
*
* @param {Number} sampleCount - The input sample count.
* @return {Number} The (potentially updated) output sample count.
*/
getSampleCount( sampleCount ) {
let count = 1;
if ( sampleCount > 1 ) {
// WebGPU only supports power-of-two sample counts and 2 is not a valid value
count = Math.pow( 2, Math.floor( Math.log2( sampleCount ) ) );
if ( count === 2 ) {
count = 4;
}
}
return count;
}
/**
* Returns the sample count of the given render context.
*
* @param {RenderContext} renderContext - The render context.
* @return {Number} The sample count.
*/
getSampleCountRenderContext( renderContext ) {
if ( renderContext.textures !== null ) {
return this.getSampleCount( renderContext.sampleCount );
}
return this.getSampleCount( this.backend.renderer.samples );
}
/**
* Returns the preferred canvas format.
*
* There is a separate method for this so it's possible to
* honor edge cases for specific devices.
*
* @return {String} The GPU texture format of the canvas.
*/
getPreferredCanvasFormat() {
const outputType = this.backend.parameters.outputType;
if ( outputType === undefined ) {
return navigator.gpu.getPreferredCanvasFormat();
} else if ( outputType === UnsignedByteType ) {
return GPUTextureFormat.BGRA8Unorm;
} else if ( outputType === HalfFloatType ) {
return GPUTextureFormat.RGBA16Float;
} else {
throw new Error( 'Unsupported outputType' );
}
}
}
const typedArraysToVertexFormatPrefix = new Map( [
[ Int8Array, [ 'sint8', 'snorm8' ]],
[ Uint8Array, [ 'uint8', 'unorm8' ]],
[ Int16Array, [ 'sint16', 'snorm16' ]],
[ Uint16Array, [ 'uint16', 'unorm16' ]],
[ Int32Array, [ 'sint32', 'snorm32' ]],
[ Uint32Array, [ 'uint32', 'unorm32' ]],
[ Float32Array, [ 'float32', ]],
] );
const typedAttributeToVertexFormatPrefix = new Map( [
[ Float16BufferAttribute, [ 'float16', ]],
] );
const typeArraysToVertexFormatPrefixForItemSize1 = new Map( [
[ Int32Array, 'sint32' ],
[ Int16Array, 'sint32' ], // patch for INT16
[ Uint32Array, 'uint32' ],
[ Uint16Array, 'uint32' ], // patch for UINT16
[ Float32Array, 'float32' ]
] );
/**
* A WebGPU backend utility module for managing shader attributes.
*
* @private
*/
class WebGPUAttributeUtils {
/**
* Constructs a new utility object.
*
* @param {WebGPUBackend} backend - The WebGPU backend.
*/
constructor( backend ) {
/**
* A reference to the WebGPU backend.
*
* @type {WebGPUBackend}
*/
this.backend = backend;
}
/**
* Creates the GPU buffer for the given buffer attribute.
*
* @param {BufferAttribute} attribute - The buffer attribute.
* @param {GPUBufferUsage} usage - A flag that indicates how the buffer may be used after its creation.
*/
createAttribute( attribute, usage ) {
const bufferAttribute = this._getBufferAttribute( attribute );
const backend = this.backend;
const bufferData = backend.get( bufferAttribute );
let buffer = bufferData.buffer;
if ( buffer === undefined ) {
const device = backend.device;
let array = bufferAttribute.array;
// patch for INT16 and UINT16
if ( attribute.normalized === false ) {
if ( array.constructor === Int16Array ) {
array = new Int32Array( array );
} else if ( array.constructor === Uint16Array ) {
array = new Uint32Array( array );
if ( usage & GPUBufferUsage.INDEX ) {
for ( let i = 0; i < array.length; i ++ ) {
if ( array[ i ] === 0xffff ) array[ i ] = 0xffffffff; // use correct primitive restart index
}
}
}
}
bufferAttribute.array = array;
if ( ( bufferAttribute.isStorageBufferAttribute || bufferAttribute.isStorageInstancedBufferAttribute ) && bufferAttribute.itemSize === 3 ) {
array = new array.constructor( bufferAttribute.count * 4 );
for ( let i = 0; i < bufferAttribute.count; i ++ ) {
array.set( bufferAttribute.array.subarray( i * 3, i * 3 + 3 ), i * 4 );
}
// Update BufferAttribute
bufferAttribute.itemSize = 4;
bufferAttribute.array = array;
}
const size = array.byteLength + ( ( 4 - ( array.byteLength % 4 ) ) % 4 ); // ensure 4 byte alignment, see #20441
buffer = device.createBuffer( {
label: bufferAttribute.name,
size: size,
usage: usage,
mappedAtCreation: true
} );
new array.constructor( buffer.getMappedRange() ).set( array );
buffer.unmap();
bufferData.buffer = buffer;
}
}
/**
* Updates the GPU buffer of the given buffer attribute.
*
* @param {BufferAttribute} attribute - The buffer attribute.
*/
updateAttribute( attribute ) {
const bufferAttribute = this._getBufferAttribute( attribute );
const backend = this.backend;
const device = backend.device;
const buffer = backend.get( bufferAttribute ).buffer;
const array = bufferAttribute.array;
const isTypedArray = this._isTypedArray( array );
const updateRanges = bufferAttribute.updateRanges;
if ( updateRanges.length === 0 ) {
// Not using update ranges
device.queue.writeBuffer(
buffer,
0,
array,
0
);
} else {
const byteOffsetFactor = isTypedArray ? 1 : array.BYTES_PER_ELEMENT;
for ( let i = 0, l = updateRanges.length; i < l; i ++ ) {
const range = updateRanges[ i ];
const dataOffset = range.start * byteOffsetFactor;
const size = range.count * byteOffsetFactor;
device.queue.writeBuffer(
buffer,
0,
array,
dataOffset,
size
);
}
bufferAttribute.clearUpdateRanges();
}
}
/**
* This method creates the vertex buffer layout data which are
* require when creating a render pipeline for the given render object.
*
* @param {RenderObject} renderObject - The render object.
* @return {Array<Object>} An array holding objects which describe the vertex buffer layout.
*/
createShaderVertexBuffers( renderObject ) {
const attributes = renderObject.getAttributes();
const vertexBuffers = new Map();
for ( let slot = 0; slot < attributes.length; slot ++ ) {
const geometryAttribute = attributes[ slot ];
const bytesPerElement = geometryAttribute.array.BYTES_PER_ELEMENT;
const bufferAttribute = this._getBufferAttribute( geometryAttribute );
let vertexBufferLayout = vertexBuffers.get( bufferAttribute );
if ( vertexBufferLayout === undefined ) {
let arrayStride, stepMode;
if ( geometryAttribute.isInterleavedBufferAttribute === true ) {
arrayStride = geometryAttribute.data.stride * bytesPerElement;
stepMode = geometryAttribute.data.isInstancedInterleavedBuffer ? GPUInputStepMode.Instance : GPUInputStepMode.Vertex;
} else {
arrayStride = geometryAttribute.itemSize * bytesPerElement;
stepMode = geometryAttribute.isInstancedBufferAttribute ? GPUInputStepMode.Instance : GPUInputStepMode.Vertex;
}
// patch for INT16 and UINT16
if ( geometryAttribute.normalized === false && ( geometryAttribute.array.constructor === Int16Array || geometryAttribute.array.constructor === Uint16Array ) ) {
arrayStride = 4;
}
vertexBufferLayout = {
arrayStride,
attributes: [],
stepMode
};
vertexBuffers.set( bufferAttribute, vertexBufferLayout );
}
const format = this._getVertexFormat( geometryAttribute );
const offset = ( geometryAttribute.isInterleavedBufferAttribute === true ) ? geometryAttribute.offset * bytesPerElement : 0;
vertexBufferLayout.attributes.push( {
shaderLocation: slot,
offset,
format
} );
}
return Array.from( vertexBuffers.values() );
}
/**
* Destroys the GPU buffer of the given buffer attribute.
*
* @param {BufferAttribute} attribute - The buffer attribute.
*/
destroyAttribute( attribute ) {
const backend = this.backend;
const data = backend.get( this._getBufferAttribute( attribute ) );
data.buffer.destroy();
backend.delete( attribute );
}
/**
* This method performs a readback operation by moving buffer data from
* a storage buffer attribute from the GPU to the CPU.
*
* @async
* @param {StorageBufferAttribute} attribute - The storage buffer attribute.
* @return {Promise<ArrayBuffer>} A promise that resolves with the buffer data when the data are ready.
*/
async getArrayBufferAsync( attribute ) {
const backend = this.backend;
const device = backend.device;
const data = backend.get( this._getBufferAttribute( attribute ) );
const bufferGPU = data.buffer;
const size = bufferGPU.size;
const readBufferGPU = device.createBuffer( {
label: `${ attribute.name }_readback`,
size,
usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ
} );
const cmdEncoder = device.createCommandEncoder( {
label: `readback_encoder_${ attribute.name }`
} );
cmdEncoder.copyBufferToBuffer(
bufferGPU,
0,
readBufferGPU,
0,
size
);
const gpuCommands = cmdEncoder.finish();
device.queue.submit( [ gpuCommands ] );
await readBufferGPU.mapAsync( GPUMapMode.READ );
const arrayBuffer = readBufferGPU.getMappedRange();
const dstBuffer = new attribute.array.constructor( arrayBuffer.slice( 0 ) );
readBufferGPU.unmap();
return dstBuffer.buffer;
}
/**
* Returns the vertex format of the given buffer attribute.
*
* @private
* @param {BufferAttribute} geometryAttribute - The buffer attribute.
* @return {String} The vertex format (e.g. 'float32x3').
*/
_getVertexFormat( geometryAttribute ) {
const { itemSize, normalized } = geometryAttribute;
const ArrayType = geometryAttribute.array.constructor;
const AttributeType = geometryAttribute.constructor;
let format;
if ( itemSize === 1 ) {
format = typeArraysToVertexFormatPrefixForItemSize1.get( ArrayType );
} else {
const prefixOptions = typedAttributeToVertexFormatPrefix.get( AttributeType ) || typedArraysToVertexFormatPrefix.get( ArrayType );
const prefix = prefixOptions[ normalized ? 1 : 0 ];
if ( prefix ) {
const bytesPerUnit = ArrayType.BYTES_PER_ELEMENT * itemSize;
const paddedBytesPerUnit = Math.floor( ( bytesPerUnit + 3 ) / 4 ) * 4;
const paddedItemSize = paddedBytesPerUnit / ArrayType.BYTES_PER_ELEMENT;
if ( paddedItemSize % 1 ) {
throw new Error( 'THREE.WebGPUAttributeUtils: Bad vertex format item size.' );
}
format = `${prefix}x${paddedItemSize}`;
}
}
if ( ! format ) {
console.error( 'THREE.WebGPUAttributeUtils: Vertex format not supported yet.' );
}
return format;
}
/**
* Returns `true` if the given array is a typed array.
*
* @private
* @param {Any} array - The array.
* @return {Boolean} Whether the given array is a typed array or not.
*/
_isTypedArray( array ) {
return ArrayBuffer.isView( array ) && ! ( array instanceof DataView );
}
/**
* Utility method for handling interleaved buffer attributes correctly.
* To process them, their `InterleavedBuffer` is returned.
*
* @private
* @param {BufferAttribute} attribute - The attribute.
* @return {BufferAttribute|InterleavedBuffer}
*/
_getBufferAttribute( attribute ) {
if ( attribute.isInterleavedBufferAttribute ) attribute = attribute.data;
return attribute;
}
}
/**
* A WebGPU backend utility module for managing bindings.
*
* When reading the documentation it's helpful to keep in mind that
* all class definitions starting with 'GPU*' are modules from the
* WebGPU API. So for example `BindGroup` is a class from the engine
* whereas `GPUBindGroup` is a class from WebGPU.
*
* @private
*/
class WebGPUBindingUtils {
/**
* Constructs a new utility object.
*
* @param {WebGPUBackend} backend - The WebGPU backend.
*/
constructor( backend ) {
/**
* A reference to the WebGPU backend.
*
* @type {WebGPUBackend}
*/
this.backend = backend;
/**
* A cache for managing bind group layouts.
*
* @type {WeakMap<Array<Binding>,GPUBindGroupLayout>}
*/
this.bindGroupLayoutCache = new WeakMap();
}
/**
* Creates a GPU bind group layout for the given bind group.
*
* @param {BindGroup} bindGroup - The bind group.
* @return {GPUBindGroupLayout} The GPU bind group layout.
*/
createBindingsLayout( bindGroup ) {
const backend = this.backend;
const device = backend.device;
const entries = [];
let index = 0;
for ( const binding of bindGroup.bindings ) {
const bindingGPU = {
binding: index ++,
visibility: binding.visibility
};
if ( binding.isUniformBuffer || binding.isStorageBuffer ) {
const buffer = {}; // GPUBufferBindingLayout
if ( binding.isStorageBuffer ) {
if ( binding.visibility & 4 ) {
// compute
if ( binding.access === NodeAccess.READ_WRITE || binding.access === NodeAccess.WRITE_ONLY ) {
buffer.type = GPUBufferBindingType.Storage;
} else {
buffer.type = GPUBufferBindingType.ReadOnlyStorage;
}
} else {
buffer.type = GPUBufferBindingType.ReadOnlyStorage;
}
}
bindingGPU.buffer = buffer;
} else if ( binding.isSampler ) {
const sampler = {}; // GPUSamplerBindingLayout
if ( binding.texture.isDepthTexture ) {
if ( binding.texture.compareFunction !== null ) {
sampler.type = 'comparison';
}
}
bindingGPU.sampler = sampler;
} else if ( binding.isSampledTexture && binding.texture.isVideoTexture ) {
bindingGPU.externalTexture = {}; // GPUExternalTextureBindingLayout
} else if ( binding.isSampledTexture && binding.store ) {
const storageTexture = {}; // GPUStorageTextureBindingLayout
storageTexture.format = this.backend.get( binding.texture ).texture.format;
const access = binding.access;
if ( access === NodeAccess.READ_WRITE ) {
storageTexture.access = GPUStorageTextureAccess.ReadWrite;
} else if ( access === NodeAccess.WRITE_ONLY ) {
storageTexture.access = GPUStorageTextureAccess.WriteOnly;
} else {
storageTexture.access = GPUStorageTextureAccess.ReadOnly;
}
bindingGPU.storageTexture = storageTexture;
} else if ( binding.isSampledTexture ) {
const texture = {}; // GPUTextureBindingLayout
const { primarySamples } = backend.utils.getTextureSampleData( binding.texture );
if ( primarySamples > 1 ) {
texture.multisampled = true;
if ( ! binding.texture.isDepthTexture ) {
texture.sampleType = GPUTextureSampleType.UnfilterableFloat;
}
}
if ( binding.texture.isDepthTexture ) {
texture.sampleType = GPUTextureSampleType.Depth;
} else if ( binding.texture.isDataTexture || binding.texture.isDataArrayTexture || binding.texture.isData3DTexture ) {
const type = binding.texture.type;
if ( type === IntType ) {
texture.sampleType = GPUTextureSampleType.SInt;
} else if ( type === UnsignedIntType ) {
texture.sampleType = GPUTextureSampleType.UInt;
} else if ( type === FloatType ) {
if ( this.backend.hasFeature( 'float32-filterable' ) ) {
texture.sampleType = GPUTextureSampleType.Float;
} else {
texture.sampleType = GPUTextureSampleType.UnfilterableFloat;
}
}
}
if ( binding.isSampledCubeTexture ) {
texture.viewDimension = GPUTextureViewDimension.Cube;
} else if ( binding.texture.isDataArrayTexture || binding.texture.isCompressedArrayTexture ) {
texture.viewDimension = GPUTextureViewDimension.TwoDArray;
} else if ( binding.isSampledTexture3D ) {
texture.viewDimension = GPUTextureViewDimension.ThreeD;
}
bindingGPU.texture = texture;
} else {
console.error( `WebGPUBindingUtils: Unsupported binding "${ binding }".` );
}
entries.push( bindingGPU );
}
return device.createBindGroupLayout( { entries } );
}
/**
* Creates bindings from the given bind group definition.
*
* @param {BindGroup} bindGroup - The bind group.
* @param {Array<BindGroup>} bindings - Array of bind groups.
* @param {Number} cacheIndex - The cache index.
* @param {Number} version - The version.
*/
createBindings( bindGroup, bindings, cacheIndex, version = 0 ) {
const { backend, bindGroupLayoutCache } = this;
const bindingsData = backend.get( bindGroup );
// setup (static) binding layout and (dynamic) binding group
let bindLayoutGPU = bindGroupLayoutCache.get( bindGroup.bindingsReference );
if ( bindLayoutGPU === undefined ) {
bindLayoutGPU = this.createBindingsLayout( bindGroup );
bindGroupLayoutCache.set( bindGroup.bindingsReference, bindLayoutGPU );
}
let bindGroupGPU;
if ( cacheIndex > 0 ) {
if ( bindingsData.groups === undefined ) {
bindingsData.groups = [];
bindingsData.versions = [];
}
if ( bindingsData.versions[ cacheIndex ] === version ) {
bindGroupGPU = bindingsData.groups[ cacheIndex ];
}
}
if ( bindGroupGPU === undefined ) {
bindGroupGPU = this.createBindGroup( bindGroup, bindLayoutGPU );
if ( cacheIndex > 0 ) {
bindingsData.groups[ cacheIndex ] = bindGroupGPU;
bindingsData.versions[ cacheIndex ] = version;
}
}
bindingsData.group = bindGroupGPU;
bindingsData.layout = bindLayoutGPU;
}
/**
* Updates a buffer binding.
*
* @param {Buffer} binding - The buffer binding to update.
*/
updateBinding( binding ) {
const backend = this.backend;
const device = backend.device;
const buffer = binding.buffer;
const bufferGPU = backend.get( binding ).buffer;
device.queue.writeBuffer( bufferGPU, 0, buffer, 0 );
}
/**
* Creates a GPU bind group for the camera index.
*
* @param {Uint32Array} data - The index data.
* @param {GPUBindGroupLayout} layout - The GPU bind group layout.
* @return {GPUBindGroup} The GPU bind group.
*/
createBindGroupIndex( data, layout ) {
const backend = this.backend;
const device = backend.device;
const usage = GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST;
const index = data[ 0 ];
const buffer = device.createBuffer( {
label: 'bindingCameraIndex_' + index,
size: 16, // uint(4) * 4
usage: usage
} );
device.queue.writeBuffer( buffer, 0, data, 0 );
const entries = [ { binding: 0, resource: { buffer } } ];
return device.createBindGroup( {
label: 'bindGroupCameraIndex_' + index,
layout,
entries
} );
}
/**
* Creates a GPU bind group for the given bind group and GPU layout.
*
* @param {BindGroup} bindGroup - The bind group.
* @param {GPUBindGroupLayout} layoutGPU - The GPU bind group layout.
* @return {GPUBindGroup} The GPU bind group.
*/
createBindGroup( bindGroup, layoutGPU ) {
const backend = this.backend;
const device = backend.device;
let bindingPoint = 0;
const entriesGPU = [];
for ( const binding of bindGroup.bindings ) {
if ( binding.isUniformBuffer ) {
const bindingData = backend.get( binding );
if ( bindingData.buffer === undefined ) {
const byteLength = binding.byteLength;
const usage = GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST;
const bufferGPU = device.createBuffer( {
label: 'bindingBuffer_' + binding.name,
size: byteLength,
usage: usage
} );
bindingData.buffer = bufferGPU;
}
entriesGPU.push( { binding: bindingPoint, resource: { buffer: bindingData.buffer } } );
} else if ( binding.isStorageBuffer ) {
const bindingData = backend.get( binding );
if ( bindingData.buffer === undefined ) {
const attribute = binding.attribute;
//const usage = GPUBufferUsage.STORAGE | GPUBufferUsage.VERTEX | /*GPUBufferUsage.COPY_SRC |*/ GPUBufferUsage.COPY_DST;
//backend.attributeUtils.createAttribute( attribute, usage ); // @TODO: Move it to universal renderer
bindingData.buffer = backend.get( attribute ).buffer;
}
entriesGPU.push( { binding: bindingPoint, resource: { buffer: bindingData.buffer } } );
} else if ( binding.isSampler ) {
const textureGPU = backend.get( binding.texture );
entriesGPU.push( { binding: bindingPoint, resource: textureGPU.sampler } );
} else if ( binding.isSampledTexture ) {
const textureData = backend.get( binding.texture );
let resourceGPU;
if ( textureData.externalTexture !== undefined ) {
resourceGPU = device.importExternalTexture( { source: textureData.externalTexture } );
} else {
const mipLevelCount = binding.store ? 1 : textureData.texture.mipLevelCount;
const propertyName = `view-${ textureData.texture.width }-${ textureData.texture.height }-${ mipLevelCount }`;
resourceGPU = textureData[ propertyName ];
if ( resourceGPU === undefined ) {
const aspectGPU = GPUTextureAspect.All;
let dimensionViewGPU;
if ( binding.isSampledCubeTexture ) {
dimensionViewGPU = GPUTextureViewDimension.Cube;
} else if ( binding.isSampledTexture3D ) {
dimensionViewGPU = GPUTextureViewDimension.ThreeD;
} else if ( binding.texture.isDataArrayTexture || binding.texture.isCompressedArrayTexture ) {
dimensionViewGPU = GPUTextureViewDimension.TwoDArray;
} else {
dimensionViewGPU = GPUTextureViewDimension.TwoD;
}
resourceGPU = textureData[ propertyName ] = textureData.texture.createView( { aspect: aspectGPU, dimension: dimensionViewGPU, mipLevelCount } );
}
}
entriesGPU.push( { binding: bindingPoint, resource: resourceGPU } );
}
bindingPoint ++;
}
return device.createBindGroup( {
label: 'bindGroup_' + bindGroup.name,
layout: layoutGPU,
entries: entriesGPU
} );
}
}
/**
* A WebGPU backend utility module for managing pipelines.
*
* @private
*/
class WebGPUPipelineUtils {
/**
* Constructs a new utility object.
*
* @param {WebGPUBackend} backend - The WebGPU backend.
*/
constructor( backend ) {
/**
* A reference to the WebGPU backend.
*
* @type {WebGPUBackend}
*/
this.backend = backend;
}
/**
* Returns the sample count derived from the given render context.
*
* @private
* @param {RenderContext} renderContext - The render context.
* @return {Number} The sample count.
*/
_getSampleCount( renderContext ) {
return this.backend.utils.getSampleCountRenderContext( renderContext );
}
/**
* Creates a render pipeline for the given render object.
*
* @param {RenderObject} renderObject - The render object.
* @param {Array<Promise>} promises - An array of compilation promises which are used in `compileAsync()`.
*/
createRenderPipeline( renderObject, promises ) {
const { object, material, geometry, pipeline } = renderObject;
const { vertexProgram, fragmentProgram } = pipeline;
const backend = this.backend;
const device = backend.device;
const utils = backend.utils;
const pipelineData = backend.get( pipeline );
// bind group layouts
const bindGroupLayouts = [];
for ( const bindGroup of renderObject.getBindings() ) {
const bindingsData = backend.get( bindGroup );
bindGroupLayouts.push( bindingsData.layout );
}
// vertex buffers
const vertexBuffers = backend.attributeUtils.createShaderVertexBuffers( renderObject );
// blending
let blending;
if ( material.transparent === true && material.blending !== NoBlending ) {
blending = this._getBlending( material );
}
// stencil
let stencilFront = {};
if ( material.stencilWrite === true ) {
stencilFront = {
compare: this._getStencilCompare( material ),
failOp: this._getStencilOperation( material.stencilFail ),
depthFailOp: this._getStencilOperation( material.stencilZFail ),
passOp: this._getStencilOperation( material.stencilZPass )
};
}
const colorWriteMask = this._getColorWriteMask( material );
const targets = [];
if ( renderObject.context.textures !== null ) {
const textures = renderObject.context.textures;
for ( let i = 0; i < textures.length; i ++ ) {
const colorFormat = utils.getTextureFormatGPU( textures[ i ] );
targets.push( {
format: colorFormat,
blend: blending,
writeMask: colorWriteMask
} );
}
} else {
const colorFormat = utils.getCurrentColorFormat( renderObject.context );
targets.push( {
format: colorFormat,
blend: blending,
writeMask: colorWriteMask
} );
}
const vertexModule = backend.get( vertexProgram ).module;
const fragmentModule = backend.get( fragmentProgram ).module;
const primitiveState = this._getPrimitiveState( object, geometry, material );
const depthCompare = this._getDepthCompare( material );
const depthStencilFormat = utils.getCurrentDepthStencilFormat( renderObject.context );
const sampleCount = this._getSampleCount( renderObject.context );
const pipelineDescriptor = {
label: `renderPipeline_${ material.name || material.type }_${ material.id }`,
vertex: Object.assign( {}, vertexModule, { buffers: vertexBuffers } ),
fragment: Object.assign( {}, fragmentModule, { targets } ),
primitive: primitiveState,
multisample: {
count: sampleCount,
alphaToCoverageEnabled: material.alphaToCoverage && sampleCount > 1
},
layout: device.createPipelineLayout( {
bindGroupLayouts
} )
};
const depthStencil = {};
const renderDepth = renderObject.context.depth;
const renderStencil = renderObject.context.stencil;
if ( renderDepth === true || renderStencil === true ) {
if ( renderDepth === true ) {
depthStencil.format = depthStencilFormat;
depthStencil.depthWriteEnabled = material.depthWrite;
depthStencil.depthCompare = depthCompare;
}
if ( renderStencil === true ) {
depthStencil.stencilFront = stencilFront;
depthStencil.stencilBack = {}; // three.js does not provide an API to configure the back function (gl.stencilFuncSeparate() was never used)
depthStencil.stencilReadMask = material.stencilFuncMask;
depthStencil.stencilWriteMask = material.stencilWriteMask;
}
pipelineDescriptor.depthStencil = depthStencil;
}
if ( promises === null ) {
pipelineData.pipeline = device.createRenderPipeline( pipelineDescriptor );
} else {
const p = new Promise( ( resolve /*, reject*/ ) => {
device.createRenderPipelineAsync( pipelineDescriptor ).then( pipeline => {
pipelineData.pipeline = pipeline;
resolve();
} );
} );
promises.push( p );
}
}
/**
* Creates GPU render bundle encoder for the given render context.
*
* @param {RenderContext} renderContext - The render context.
* @return {GPURenderBundleEncoder} The GPU render bundle encoder.
*/
createBundleEncoder( renderContext ) {
const backend = this.backend;
const { utils, device } = backend;
const depthStencilFormat = utils.getCurrentDepthStencilFormat( renderContext );
const colorFormat = utils.getCurrentColorFormat( renderContext );
const sampleCount = this._getSampleCount( renderContext );
const descriptor = {
label: 'renderBundleEncoder',
colorFormats: [ colorFormat ],
depthStencilFormat,
sampleCount
};
return device.createRenderBundleEncoder( descriptor );
}
/**
* Creates a compute pipeline for the given compute node.
*
* @param {ComputePipeline} pipeline - The compute pipeline.
* @param {Array<BindGroup>} bindings - The bindings.
*/
createComputePipeline( pipeline, bindings ) {
const backend = this.backend;
const device = backend.device;
const computeProgram = backend.get( pipeline.computeProgram ).module;
const pipelineGPU = backend.get( pipeline );
// bind group layouts
const bindGroupLayouts = [];
for ( const bindingsGroup of bindings ) {
const bindingsData = backend.get( bindingsGroup );
bindGroupLayouts.push( bindingsData.layout );
}
pipelineGPU.pipeline = device.createComputePipeline( {
compute: computeProgram,
layout: device.createPipelineLayout( {
bindGroupLayouts
} )
} );
}
/**
* Returns the blending state as a descriptor object required
* for the pipeline creation.
*
* @private
* @param {Material} material - The material.
* @return {Object} The blending state.
*/
_getBlending( material ) {
let color, alpha;
const blending = material.blending;
const blendSrc = material.blendSrc;
const blendDst = material.blendDst;
const blendEquation = material.blendEquation;
if ( blending === CustomBlending ) {
const blendSrcAlpha = material.blendSrcAlpha !== null ? material.blendSrcAlpha : blendSrc;
const blendDstAlpha = material.blendDstAlpha !== null ? material.blendDstAlpha : blendDst;
const blendEquationAlpha = material.blendEquationAlpha !== null ? material.blendEquationAlpha : blendEquation;
color = {
srcFactor: this._getBlendFactor( blendSrc ),
dstFactor: this._getBlendFactor( blendDst ),
operation: this._getBlendOperation( blendEquation )
};
alpha = {
srcFactor: this._getBlendFactor( blendSrcAlpha ),
dstFactor: this._getBlendFactor( blendDstAlpha ),
operation: this._getBlendOperation( blendEquationAlpha )
};
} else {
const premultipliedAlpha = material.premultipliedAlpha;
const setBlend = ( srcRGB, dstRGB, srcAlpha, dstAlpha ) => {
color = {
srcFactor: srcRGB,
dstFactor: dstRGB,
operation: GPUBlendOperation.Add
};
alpha = {
srcFactor: srcAlpha,
dstFactor: dstAlpha,
operation: GPUBlendOperation.Add
};
};
if ( premultipliedAlpha ) {
switch ( blending ) {
case NormalBlending:
setBlend( GPUBlendFactor.One, GPUBlendFactor.OneMinusSrcAlpha, GPUBlendFactor.One, GPUBlendFactor.OneMinusSrcAlpha );
break;
case AdditiveBlending:
setBlend( GPUBlendFactor.One, GPUBlendFactor.One, GPUBlendFactor.One, GPUBlendFactor.One );
break;
case SubtractiveBlending:
setBlend( GPUBlendFactor.Zero, GPUBlendFactor.OneMinusSrc, GPUBlendFactor.Zero, GPUBlendFactor.One );
break;
case MultiplyBlending:
setBlend( GPUBlendFactor.Zero, GPUBlendFactor.Src, GPUBlendFactor.Zero, GPUBlendFactor.SrcAlpha );
break;
}
} else {
switch ( blending ) {
case NormalBlending:
setBlend( GPUBlendFactor.SrcAlpha, GPUBlendFactor.OneMinusSrcAlpha, GPUBlendFactor.One, GPUBlendFactor.OneMinusSrcAlpha );
break;
case AdditiveBlending:
setBlend( GPUBlendFactor.SrcAlpha, GPUBlendFactor.One, GPUBlendFactor.SrcAlpha, GPUBlendFactor.One );
break;
case SubtractiveBlending:
setBlend( GPUBlendFactor.Zero, GPUBlendFactor.OneMinusSrc, GPUBlendFactor.Zero, GPUBlendFactor.One );
break;
case MultiplyBlending:
setBlend( GPUBlendFactor.Zero, GPUBlendFactor.Src, GPUBlendFactor.Zero, GPUBlendFactor.Src );
break;
}
}
}
if ( color !== undefined && alpha !== undefined ) {
return { color, alpha };
} else {
console.error( 'THREE.WebGPURenderer: Invalid blending: ', blending );
}
}
/**
* Returns the GPU blend factor which is required for the pipeline creation.
*
* @private
* @param {Number} blend - The blend factor as a three.js constant.
* @return {String} The GPU blend factor.
*/
_getBlendFactor( blend ) {
let blendFactor;
switch ( blend ) {
case ZeroFactor:
blendFactor = GPUBlendFactor.Zero;
break;
case OneFactor:
blendFactor = GPUBlendFactor.One;
break;
case SrcColorFactor:
blendFactor = GPUBlendFactor.Src;
break;
case OneMinusSrcColorFactor:
blendFactor = GPUBlendFactor.OneMinusSrc;
break;
case SrcAlphaFactor:
blendFactor = GPUBlendFactor.SrcAlpha;
break;
case OneMinusSrcAlphaFactor:
blendFactor = GPUBlendFactor.OneMinusSrcAlpha;
break;
case DstColorFactor:
blendFactor = GPUBlendFactor.Dst;
break;
case OneMinusDstColorFactor:
blendFactor = GPUBlendFactor.OneMinusDstColor;
break;
case DstAlphaFactor:
blendFactor = GPUBlendFactor.DstAlpha;
break;
case OneMinusDstAlphaFactor:
blendFactor = GPUBlendFactor.OneMinusDstAlpha;
break;
case SrcAlphaSaturateFactor:
blendFactor = GPUBlendFactor.SrcAlphaSaturated;
break;
case BlendColorFactor:
blendFactor = GPUBlendFactor.Constant;
break;
case OneMinusBlendColorFactor:
blendFactor = GPUBlendFactor.OneMinusConstant;
break;
default:
console.error( 'THREE.WebGPURenderer: Blend factor not supported.', blend );
}
return blendFactor;
}
/**
* Returns the GPU stencil compare function which is required for the pipeline creation.
*
* @private
* @param {Material} material - The material.
* @return {String} The GPU stencil compare function.
*/
_getStencilCompare( material ) {
let stencilCompare;
const stencilFunc = material.stencilFunc;
switch ( stencilFunc ) {
case NeverStencilFunc:
stencilCompare = GPUCompareFunction.Never;
break;
case AlwaysStencilFunc:
stencilCompare = GPUCompareFunction.Always;
break;
case LessStencilFunc:
stencilCompare = GPUCompareFunction.Less;
break;
case LessEqualStencilFunc:
stencilCompare = GPUCompareFunction.LessEqual;
break;
case EqualStencilFunc:
stencilCompare = GPUCompareFunction.Equal;
break;
case GreaterEqualStencilFunc:
stencilCompare = GPUCompareFunction.GreaterEqual;
break;
case GreaterStencilFunc:
stencilCompare = GPUCompareFunction.Greater;
break;
case NotEqualStencilFunc:
stencilCompare = GPUCompareFunction.NotEqual;
break;
default:
console.error( 'THREE.WebGPURenderer: Invalid stencil function.', stencilFunc );
}
return stencilCompare;
}
/**
* Returns the GPU stencil operation which is required for the pipeline creation.
*
* @private
* @param {Number} op - A three.js constant defining the stencil operation.
* @return {String} The GPU stencil operation.
*/
_getStencilOperation( op ) {
let stencilOperation;
switch ( op ) {
case KeepStencilOp:
stencilOperation = GPUStencilOperation.Keep;
break;
case ZeroStencilOp:
stencilOperation = GPUStencilOperation.Zero;
break;
case ReplaceStencilOp:
stencilOperation = GPUStencilOperation.Replace;
break;
case InvertStencilOp:
stencilOperation = GPUStencilOperation.Invert;
break;
case IncrementStencilOp:
stencilOperation = GPUStencilOperation.IncrementClamp;
break;
case DecrementStencilOp:
stencilOperation = GPUStencilOperation.DecrementClamp;
break;
case IncrementWrapStencilOp:
stencilOperation = GPUStencilOperation.IncrementWrap;
break;
case DecrementWrapStencilOp:
stencilOperation = GPUStencilOperation.DecrementWrap;
break;
default:
console.error( 'THREE.WebGPURenderer: Invalid stencil operation.', stencilOperation );
}
return stencilOperation;
}
/**
* Returns the GPU blend operation which is required for the pipeline creation.
*
* @private
* @param {Number} blendEquation - A three.js constant defining the blend equation.
* @return {String} The GPU blend operation.
*/
_getBlendOperation( blendEquation ) {
let blendOperation;
switch ( blendEquation ) {
case AddEquation:
blendOperation = GPUBlendOperation.Add;
break;
case SubtractEquation:
blendOperation = GPUBlendOperation.Subtract;
break;
case ReverseSubtractEquation:
blendOperation = GPUBlendOperation.ReverseSubtract;
break;
case MinEquation:
blendOperation = GPUBlendOperation.Min;
break;
case MaxEquation:
blendOperation = GPUBlendOperation.Max;
break;
default:
console.error( 'THREE.WebGPUPipelineUtils: Blend equation not supported.', blendEquation );
}
return blendOperation;
}
/**
* Returns the primitive state as a descriptor object required
* for the pipeline creation.
*
* @private
* @param {Object3D} object - The 3D object.
* @param {BufferGeometry} geometry - The geometry.
* @param {Material} material - The material.
* @return {Object} The primitive state.
*/
_getPrimitiveState( object, geometry, material ) {
const descriptor = {};
const utils = this.backend.utils;
descriptor.topology = utils.getPrimitiveTopology( object, material );
if ( geometry.index !== null && object.isLine === true && object.isLineSegments !== true ) {
descriptor.stripIndexFormat = ( geometry.index.array instanceof Uint16Array ) ? GPUIndexFormat.Uint16 : GPUIndexFormat.Uint32;
}
switch ( material.side ) {
case FrontSide:
descriptor.frontFace = GPUFrontFace.CCW;
descriptor.cullMode = GPUCullMode.Back;
break;
case BackSide:
descriptor.frontFace = GPUFrontFace.CCW;
descriptor.cullMode = GPUCullMode.Front;
break;
case DoubleSide:
descriptor.frontFace = GPUFrontFace.CCW;
descriptor.cullMode = GPUCullMode.None;
break;
default:
console.error( 'THREE.WebGPUPipelineUtils: Unknown material.side value.', material.side );
break;
}
return descriptor;
}
/**
* Returns the GPU color write mask which is required for the pipeline creation.
*
* @private
* @param {Material} material - The material.
* @return {String} The GPU color write mask.
*/
_getColorWriteMask( material ) {
return ( material.colorWrite === true ) ? GPUColorWriteFlags.All : GPUColorWriteFlags.None;
}
/**
* Returns the GPU depth compare function which is required for the pipeline creation.
*
* @private
* @param {Material} material - The material.
* @return {String} The GPU depth compare function.
*/
_getDepthCompare( material ) {
let depthCompare;
if ( material.depthTest === false ) {
depthCompare = GPUCompareFunction.Always;
} else {
const depthFunc = material.depthFunc;
switch ( depthFunc ) {
case NeverDepth:
depthCompare = GPUCompareFunction.Never;
break;
case AlwaysDepth:
depthCompare = GPUCompareFunction.Always;
break;
case LessDepth:
depthCompare = GPUCompareFunction.Less;
break;
case LessEqualDepth:
depthCompare = GPUCompareFunction.LessEqual;
break;
case EqualDepth:
depthCompare = GPUCompareFunction.Equal;
break;
case GreaterEqualDepth:
depthCompare = GPUCompareFunction.GreaterEqual;
break;
case GreaterDepth:
depthCompare = GPUCompareFunction.Greater;
break;
case NotEqualDepth:
depthCompare = GPUCompareFunction.NotEqual;
break;
default:
console.error( 'THREE.WebGPUPipelineUtils: Invalid depth function.', depthFunc );
}
}
return depthCompare;
}
}
/**
* Manages a pool of WebGPU timestamp queries for performance measurement.
* Extends the base TimestampQueryPool to provide WebGPU-specific implementation.
* @extends TimestampQueryPool
*/
class WebGPUTimestampQueryPool extends TimestampQueryPool {
/**
* Creates a new WebGPU timestamp query pool.
* @param {GPUDevice} device - The WebGPU device to create queries on.
* @param {string} type - The type identifier for this query pool.
* @param {number} [maxQueries=2048] - Maximum number of queries this pool can hold.
*/
constructor( device, type, maxQueries = 2048 ) {
super( maxQueries );
this.device = device;
this.type = type;
this.querySet = this.device.createQuerySet( {
type: 'timestamp',
count: this.maxQueries,
label: `queryset_global_timestamp_${type}`
} );
const bufferSize = this.maxQueries * 8;
this.resolveBuffer = this.device.createBuffer( {
label: `buffer_timestamp_resolve_${type}`,
size: bufferSize,
usage: GPUBufferUsage.QUERY_RESOLVE | GPUBufferUsage.COPY_SRC
} );
this.resultBuffer = this.device.createBuffer( {
label: `buffer_timestamp_result_${type}`,
size: bufferSize,
usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ
} );
}
/**
* Allocates a pair of queries for a given render context.
* @param {Object} renderContext - The render context to allocate queries for.
* @returns {?number} The base offset for the allocated queries, or null if allocation failed.
*/
allocateQueriesForContext( renderContext ) {
if ( ! this.trackTimestamp || this.isDisposed ) return null;
if ( this.currentQueryIndex + 2 > this.maxQueries ) {
warnOnce( `WebGPUTimestampQueryPool [${ this.type }]: Maximum number of queries exceeded, when using trackTimestamp it is necessary to resolves the queries via renderer.resolveTimestampsAsync( THREE.TimestampQuery.${ this.type.toUpperCase() } ).` );
return null;
}
const baseOffset = this.currentQueryIndex;
this.currentQueryIndex += 2;
this.queryOffsets.set( renderContext.id, baseOffset );
return baseOffset;
}
/**
* Asynchronously resolves all pending queries and returns the total duration.
* If there's already a pending resolve operation, returns that promise instead.
* @returns {Promise<number>} The total duration in milliseconds, or the last valid value if resolution fails.
*/
async resolveQueriesAsync() {
if ( ! this.trackTimestamp || this.currentQueryIndex === 0 || this.isDisposed ) {
return this.lastValue;
}
if ( this.pendingResolve ) {
return this.pendingResolve;
}
this.pendingResolve = this._resolveQueries();
try {
const result = await this.pendingResolve;
return result;
} finally {
this.pendingResolve = null;
}
}
/**
* Internal method to resolve queries and calculate total duration.
* @private
* @returns {Promise<number>} The total duration in milliseconds.
*/
async _resolveQueries() {
if ( this.isDisposed ) {
return this.lastValue;
}
try {
if ( this.resultBuffer.mapState !== 'unmapped' ) {
return this.lastValue;
}
const currentOffsets = new Map( this.queryOffsets );
const queryCount = this.currentQueryIndex;
const bytesUsed = queryCount * 8;
// Reset state before GPU work
this.currentQueryIndex = 0;
this.queryOffsets.clear();
const commandEncoder = this.device.createCommandEncoder();
commandEncoder.resolveQuerySet(
this.querySet,
0,
queryCount,
this.resolveBuffer,
0
);
commandEncoder.copyBufferToBuffer(
this.resolveBuffer,
0,
this.resultBuffer,
0,
bytesUsed
);
const commandBuffer = commandEncoder.finish();
this.device.queue.submit( [ commandBuffer ] );
if ( this.resultBuffer.mapState !== 'unmapped' ) {
return this.lastValue;
}
// Create and track the mapping operation
await this.resultBuffer.mapAsync( GPUMapMode.READ, 0, bytesUsed );
if ( this.isDisposed ) {
if ( this.resultBuffer.mapState === 'mapped' ) {
this.resultBuffer.unmap();
}
return this.lastValue;
}
const times = new BigUint64Array( this.resultBuffer.getMappedRange( 0, bytesUsed ) );
let totalDuration = 0;
for ( const [ , baseOffset ] of currentOffsets ) {
const startTime = times[ baseOffset ];
const endTime = times[ baseOffset + 1 ];
const duration = Number( endTime - startTime ) / 1e6;
totalDuration += duration;
}
this.resultBuffer.unmap();
this.lastValue = totalDuration;
return totalDuration;
} catch ( error ) {
console.error( 'Error resolving queries:', error );
if ( this.resultBuffer.mapState === 'mapped' ) {
this.resultBuffer.unmap();
}
return this.lastValue;
}
}
async dispose() {
if ( this.isDisposed ) {
return;
}
this.isDisposed = true;
// Wait for pending resolve operation
if ( this.pendingResolve ) {
try {
await this.pendingResolve;
} catch ( error ) {
console.error( 'Error waiting for pending resolve:', error );
}
}
// Ensure buffer is unmapped before destroying
if ( this.resultBuffer && this.resultBuffer.mapState === 'mapped' ) {
try {
this.resultBuffer.unmap();
} catch ( error ) {
console.error( 'Error unmapping buffer:', error );
}
}
// Destroy resources
if ( this.querySet ) {
this.querySet.destroy();
this.querySet = null;
}
if ( this.resolveBuffer ) {
this.resolveBuffer.destroy();
this.resolveBuffer = null;
}
if ( this.resultBuffer ) {
this.resultBuffer.destroy();
this.resultBuffer = null;
}
this.queryOffsets.clear();
this.pendingResolve = null;
}
}
/*// debugger tools
import 'https://greggman.github.io/webgpu-avoid-redundant-state-setting/webgpu-check-redundant-state-setting.js';
//*/
/**
* A backend implementation targeting WebGPU.
*
* @private
* @augments Backend
*/
class WebGPUBackend extends Backend {
/**
* Constructs a new WebGPU backend.
*
* @param {Object} parameters - The configuration parameter.
* @param {Boolean} [parameters.logarithmicDepthBuffer=false] - Whether logarithmic depth buffer is enabled or not.
* @param {Boolean} [parameters.alpha=true] - Whether the default framebuffer (which represents the final contents of the canvas) should be transparent or opaque.
* @param {Boolean} [parameters.depth=true] - Whether the default framebuffer should have a depth buffer or not.
* @param {Boolean} [parameters.stencil=false] - Whether the default framebuffer should have a stencil buffer or not.
* @param {Boolean} [parameters.antialias=false] - Whether MSAA as the default anti-aliasing should be enabled or not.
* @param {Number} [parameters.samples=0] - When `antialias` is `true`, `4` samples are used by default. Set this parameter to any other integer value than 0 to overwrite the default.
* @param {Boolean} [parameters.forceWebGL=false] - If set to `true`, the renderer uses a WebGL 2 backend no matter if WebGPU is supported or not.
* @param {Boolean} [parameters.trackTimestamp=false] - Whether to track timestamps with a Timestamp Query API or not.
* @param {String} [parameters.powerPreference=undefined] - The power preference.
* @param {Object} [parameters.requiredLimits=undefined] - Specifies the limits that are required by the device request. The request will fail if the adapter cannot provide these limits.
* @param {GPUDevice} [parameters.device=undefined] - If there is an existing GPU device on app level, it can be passed to the renderer as a parameter.
* @param {Number} [parameters.outputType=undefined] - Texture type for output to canvas. By default, device's preferred format is used; other formats may incur overhead.
*/
constructor( parameters = {} ) {
super( parameters );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isWebGPUBackend = true;
// some parameters require default values other than "undefined"
this.parameters.alpha = ( parameters.alpha === undefined ) ? true : parameters.alpha;
this.parameters.requiredLimits = ( parameters.requiredLimits === undefined ) ? {} : parameters.requiredLimits;
/**
* Whether to track timestamps with a Timestamp Query API or not.
*
* @type {Boolean}
* @default false
*/
this.trackTimestamp = ( parameters.trackTimestamp === true );
/**
* A reference to the device.
*
* @type {GPUDevice?}
* @default null
*/
this.device = null;
/**
* A reference to the context.
*
* @type {GPUCanvasContext?}
* @default null
*/
this.context = null;
/**
* A reference to the color attachment of the default framebuffer.
*
* @type {GPUTexture?}
* @default null
*/
this.colorBuffer = null;
/**
* A reference to the default render pass descriptor.
*
* @type {Object?}
* @default null
*/
this.defaultRenderPassdescriptor = null;
/**
* A reference to a backend module holding common utility functions.
*
* @type {WebGPUUtils}
*/
this.utils = new WebGPUUtils( this );
/**
* A reference to a backend module holding shader attribute-related
* utility functions.
*
* @type {WebGPUAttributeUtils}
*/
this.attributeUtils = new WebGPUAttributeUtils( this );
/**
* A reference to a backend module holding shader binding-related
* utility functions.
*
* @type {WebGPUBindingUtils}
*/
this.bindingUtils = new WebGPUBindingUtils( this );
/**
* A reference to a backend module holding shader pipeline-related
* utility functions.
*
* @type {WebGPUPipelineUtils}
*/
this.pipelineUtils = new WebGPUPipelineUtils( this );
/**
* A reference to a backend module holding shader texture-related
* utility functions.
*
* @type {WebGPUTextureUtils}
*/
this.textureUtils = new WebGPUTextureUtils( this );
/**
* A map that manages the resolve buffers for occlusion queries.
*
* @type {Map<Number,GPUBuffer>}
*/
this.occludedResolveCache = new Map();
}
/**
* Initializes the backend so it is ready for usage.
*
* @async
* @param {Renderer} renderer - The renderer.
* @return {Promise} A Promise that resolves when the backend has been initialized.
*/
async init( renderer ) {
await super.init( renderer );
//
const parameters = this.parameters;
// create the device if it is not passed with parameters
let device;
if ( parameters.device === undefined ) {
const adapterOptions = {
powerPreference: parameters.powerPreference
};
const adapter = ( typeof navigator !== 'undefined' ) ? await navigator.gpu.requestAdapter( adapterOptions ) : null;
if ( adapter === null ) {
throw new Error( 'WebGPUBackend: Unable to create WebGPU adapter.' );
}
// feature support
const features = Object.values( GPUFeatureName );
const supportedFeatures = [];
for ( const name of features ) {
if ( adapter.features.has( name ) ) {
supportedFeatures.push( name );
}
}
const deviceDescriptor = {
requiredFeatures: supportedFeatures,
requiredLimits: parameters.requiredLimits
};
device = await adapter.requestDevice( deviceDescriptor );
} else {
device = parameters.device;
}
device.lost.then( ( info ) => {
const deviceLossInfo = {
api: 'WebGPU',
message: info.message || 'Unknown reason',
reason: info.reason || null,
originalEvent: info
};
renderer.onDeviceLost( deviceLossInfo );
} );
const context = ( parameters.context !== undefined ) ? parameters.context : renderer.domElement.getContext( 'webgpu' );
this.device = device;
this.context = context;
const alphaMode = parameters.alpha ? 'premultiplied' : 'opaque';
this.trackTimestamp = this.trackTimestamp && this.hasFeature( GPUFeatureName.TimestampQuery );
this.context.configure( {
device: this.device,
format: this.utils.getPreferredCanvasFormat(),
usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
alphaMode: alphaMode
} );
this.updateSize();
}
/**
* The coordinate system of the backend.
*
* @type {Number}
* @readonly
*/
get coordinateSystem() {
return WebGPUCoordinateSystem;
}
/**
* This method performs a readback operation by moving buffer data from
* a storage buffer attribute from the GPU to the CPU.
*
* @async
* @param {StorageBufferAttribute} attribute - The storage buffer attribute.
* @return {Promise<ArrayBuffer>} A promise that resolves with the buffer data when the data are ready.
*/
async getArrayBufferAsync( attribute ) {
return await this.attributeUtils.getArrayBufferAsync( attribute );
}
/**
* Returns the backend's rendering context.
*
* @return {GPUCanvasContext} The rendering context.
*/
getContext() {
return this.context;
}
/**
* Returns the default render pass descriptor.
*
* In WebGPU, the default framebuffer must be configured
* like custom framebuffers so the backend needs a render
* pass descriptor even when rendering directly to screen.
*
* @private
* @return {Object} The render pass descriptor.
*/
_getDefaultRenderPassDescriptor() {
let descriptor = this.defaultRenderPassdescriptor;
if ( descriptor === null ) {
const renderer = this.renderer;
descriptor = {
colorAttachments: [ {
view: null
} ],
};
if ( this.renderer.depth === true || this.renderer.stencil === true ) {
descriptor.depthStencilAttachment = {
view: this.textureUtils.getDepthBuffer( renderer.depth, renderer.stencil ).createView()
};
}
const colorAttachment = descriptor.colorAttachments[ 0 ];
if ( this.renderer.samples > 0 ) {
colorAttachment.view = this.colorBuffer.createView();
} else {
colorAttachment.resolveTarget = undefined;
}
this.defaultRenderPassdescriptor = descriptor;
}
const colorAttachment = descriptor.colorAttachments[ 0 ];
if ( this.renderer.samples > 0 ) {
colorAttachment.resolveTarget = this.context.getCurrentTexture().createView();
} else {
colorAttachment.view = this.context.getCurrentTexture().createView();
}
return descriptor;
}
/**
* Returns the render pass descriptor for the given render context.
*
* @private
* @param {RenderContext} renderContext - The render context.
* @param {Object} colorAttachmentsConfig - Configuration object for the color attachments.
* @return {Object} The render pass descriptor.
*/
_getRenderPassDescriptor( renderContext, colorAttachmentsConfig = {} ) {
const renderTarget = renderContext.renderTarget;
const renderTargetData = this.get( renderTarget );
let descriptors = renderTargetData.descriptors;
if ( descriptors === undefined ||
renderTargetData.width !== renderTarget.width ||
renderTargetData.height !== renderTarget.height ||
renderTargetData.dimensions !== renderTarget.dimensions ||
renderTargetData.activeMipmapLevel !== renderTarget.activeMipmapLevel ||
renderTargetData.activeCubeFace !== renderContext.activeCubeFace ||
renderTargetData.samples !== renderTarget.samples ||
renderTargetData.loadOp !== colorAttachmentsConfig.loadOp
) {
descriptors = {};
renderTargetData.descriptors = descriptors;
// dispose
const onDispose = () => {
renderTarget.removeEventListener( 'dispose', onDispose );
this.delete( renderTarget );
};
renderTarget.addEventListener( 'dispose', onDispose );
}
const cacheKey = renderContext.getCacheKey();
let descriptor = descriptors[ cacheKey ];
if ( descriptor === undefined ) {
const textures = renderContext.textures;
const colorAttachments = [];
let sliceIndex;
for ( let i = 0; i < textures.length; i ++ ) {
const textureData = this.get( textures[ i ] );
const viewDescriptor = {
label: `colorAttachment_${ i }`,
baseMipLevel: renderContext.activeMipmapLevel,
mipLevelCount: 1,
baseArrayLayer: renderContext.activeCubeFace,
arrayLayerCount: 1,
dimension: GPUTextureViewDimension.TwoD
};
if ( renderTarget.isRenderTarget3D ) {
sliceIndex = renderContext.activeCubeFace;
viewDescriptor.baseArrayLayer = 0;
viewDescriptor.dimension = GPUTextureViewDimension.ThreeD;
viewDescriptor.depthOrArrayLayers = textures[ i ].image.depth;
} else if ( renderTarget.isRenderTargetArray ) {
viewDescriptor.dimension = GPUTextureViewDimension.TwoDArray;
viewDescriptor.depthOrArrayLayers = textures[ i ].image.depth;
}
const textureView = textureData.texture.createView( viewDescriptor );
let view, resolveTarget;
if ( textureData.msaaTexture !== undefined ) {
view = textureData.msaaTexture.createView();
resolveTarget = textureView;
} else {
view = textureView;
resolveTarget = undefined;
}
colorAttachments.push( {
view,
depthSlice: sliceIndex,
resolveTarget,
loadOp: GPULoadOp.Load,
storeOp: GPUStoreOp.Store,
...colorAttachmentsConfig
} );
}
descriptor = {
colorAttachments,
};
if ( renderContext.depth ) {
const depthTextureData = this.get( renderContext.depthTexture );
const depthStencilAttachment = {
view: depthTextureData.texture.createView()
};
descriptor.depthStencilAttachment = depthStencilAttachment;
}
descriptors[ cacheKey ] = descriptor;
renderTargetData.width = renderTarget.width;
renderTargetData.height = renderTarget.height;
renderTargetData.samples = renderTarget.samples;
renderTargetData.activeMipmapLevel = renderContext.activeMipmapLevel;
renderTargetData.activeCubeFace = renderContext.activeCubeFace;
renderTargetData.dimensions = renderTarget.dimensions;
renderTargetData.depthSlice = sliceIndex;
renderTargetData.loadOp = colorAttachments[ 0 ].loadOp;
}
return descriptor;
}
/**
* This method is executed at the beginning of a render call and prepares
* the WebGPU state for upcoming render calls
*
* @param {RenderContext} renderContext - The render context.
*/
beginRender( renderContext ) {
const renderContextData = this.get( renderContext );
const device = this.device;
const occlusionQueryCount = renderContext.occlusionQueryCount;
let occlusionQuerySet;
if ( occlusionQueryCount > 0 ) {
if ( renderContextData.currentOcclusionQuerySet ) renderContextData.currentOcclusionQuerySet.destroy();
if ( renderContextData.currentOcclusionQueryBuffer ) renderContextData.currentOcclusionQueryBuffer.destroy();
// Get a reference to the array of objects with queries. The renderContextData property
// can be changed by another render pass before the buffer.mapAsyc() completes.
renderContextData.currentOcclusionQuerySet = renderContextData.occlusionQuerySet;
renderContextData.currentOcclusionQueryBuffer = renderContextData.occlusionQueryBuffer;
renderContextData.currentOcclusionQueryObjects = renderContextData.occlusionQueryObjects;
//
occlusionQuerySet = device.createQuerySet( { type: 'occlusion', count: occlusionQueryCount, label: `occlusionQuerySet_${ renderContext.id }` } );
renderContextData.occlusionQuerySet = occlusionQuerySet;
renderContextData.occlusionQueryIndex = 0;
renderContextData.occlusionQueryObjects = new Array( occlusionQueryCount );
renderContextData.lastOcclusionObject = null;
}
let descriptor;
if ( renderContext.textures === null ) {
descriptor = this._getDefaultRenderPassDescriptor();
} else {
descriptor = this._getRenderPassDescriptor( renderContext, { loadOp: GPULoadOp.Load } );
}
this.initTimestampQuery( renderContext, descriptor );
descriptor.occlusionQuerySet = occlusionQuerySet;
const depthStencilAttachment = descriptor.depthStencilAttachment;
if ( renderContext.textures !== null ) {
const colorAttachments = descriptor.colorAttachments;
for ( let i = 0; i < colorAttachments.length; i ++ ) {
const colorAttachment = colorAttachments[ i ];
if ( renderContext.clearColor ) {
colorAttachment.clearValue = i === 0 ? renderContext.clearColorValue : { r: 0, g: 0, b: 0, a: 1 };
colorAttachment.loadOp = GPULoadOp.Clear;
colorAttachment.storeOp = GPUStoreOp.Store;
} else {
colorAttachment.loadOp = GPULoadOp.Load;
colorAttachment.storeOp = GPUStoreOp.Store;
}
}
} else {
const colorAttachment = descriptor.colorAttachments[ 0 ];
if ( renderContext.clearColor ) {
colorAttachment.clearValue = renderContext.clearColorValue;
colorAttachment.loadOp = GPULoadOp.Clear;
colorAttachment.storeOp = GPUStoreOp.Store;
} else {
colorAttachment.loadOp = GPULoadOp.Load;
colorAttachment.storeOp = GPUStoreOp.Store;
}
}
//
if ( renderContext.depth ) {
if ( renderContext.clearDepth ) {
depthStencilAttachment.depthClearValue = renderContext.clearDepthValue;
depthStencilAttachment.depthLoadOp = GPULoadOp.Clear;
depthStencilAttachment.depthStoreOp = GPUStoreOp.Store;
} else {
depthStencilAttachment.depthLoadOp = GPULoadOp.Load;
depthStencilAttachment.depthStoreOp = GPUStoreOp.Store;
}
}
if ( renderContext.stencil ) {
if ( renderContext.clearStencil ) {
depthStencilAttachment.stencilClearValue = renderContext.clearStencilValue;
depthStencilAttachment.stencilLoadOp = GPULoadOp.Clear;
depthStencilAttachment.stencilStoreOp = GPUStoreOp.Store;
} else {
depthStencilAttachment.stencilLoadOp = GPULoadOp.Load;
depthStencilAttachment.stencilStoreOp = GPUStoreOp.Store;
}
}
//
const encoder = device.createCommandEncoder( { label: 'renderContext_' + renderContext.id } );
const currentPass = encoder.beginRenderPass( descriptor );
//
renderContextData.descriptor = descriptor;
renderContextData.encoder = encoder;
renderContextData.currentPass = currentPass;
renderContextData.currentSets = { attributes: {}, bindingGroups: [], pipeline: null, index: null };
renderContextData.renderBundles = [];
//
if ( renderContext.viewport ) {
this.updateViewport( renderContext );
}
if ( renderContext.scissor ) {
const { x, y, width, height } = renderContext.scissorValue;
currentPass.setScissorRect( x, y, width, height );
}
}
/**
* This method is executed at the end of a render call and finalizes work
* after draw calls.
*
* @param {RenderContext} renderContext - The render context.
*/
finishRender( renderContext ) {
const renderContextData = this.get( renderContext );
const occlusionQueryCount = renderContext.occlusionQueryCount;
if ( renderContextData.renderBundles.length > 0 ) {
renderContextData.currentPass.executeBundles( renderContextData.renderBundles );
}
if ( occlusionQueryCount > renderContextData.occlusionQueryIndex ) {
renderContextData.currentPass.endOcclusionQuery();
}
renderContextData.currentPass.end();
if ( occlusionQueryCount > 0 ) {
const bufferSize = occlusionQueryCount * 8; // 8 byte entries for query results
//
let queryResolveBuffer = this.occludedResolveCache.get( bufferSize );
if ( queryResolveBuffer === undefined ) {
queryResolveBuffer = this.device.createBuffer(
{
size: bufferSize,
usage: GPUBufferUsage.QUERY_RESOLVE | GPUBufferUsage.COPY_SRC
}
);
this.occludedResolveCache.set( bufferSize, queryResolveBuffer );
}
//
const readBuffer = this.device.createBuffer(
{
size: bufferSize,
usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ
}
);
// two buffers required here - WebGPU doesn't allow usage of QUERY_RESOLVE & MAP_READ to be combined
renderContextData.encoder.resolveQuerySet( renderContextData.occlusionQuerySet, 0, occlusionQueryCount, queryResolveBuffer, 0 );
renderContextData.encoder.copyBufferToBuffer( queryResolveBuffer, 0, readBuffer, 0, bufferSize );
renderContextData.occlusionQueryBuffer = readBuffer;
//
this.resolveOccludedAsync( renderContext );
}
this.device.queue.submit( [ renderContextData.encoder.finish() ] );
//
if ( renderContext.textures !== null ) {
const textures = renderContext.textures;
for ( let i = 0; i < textures.length; i ++ ) {
const texture = textures[ i ];
if ( texture.generateMipmaps === true ) {
this.textureUtils.generateMipmaps( texture );
}
}
}
}
/**
* Returns `true` if the given 3D object is fully occluded by other
* 3D objects in the scene.
*
* @param {RenderContext} renderContext - The render context.
* @param {Object3D} object - The 3D object to test.
* @return {Boolean} Whether the 3D object is fully occluded or not.
*/
isOccluded( renderContext, object ) {
const renderContextData = this.get( renderContext );
return renderContextData.occluded && renderContextData.occluded.has( object );
}
/**
* This method processes the result of occlusion queries and writes it
* into render context data.
*
* @async
* @param {RenderContext} renderContext - The render context.
* @return {Promise} A Promise that resolves when the occlusion query results have been processed.
*/
async resolveOccludedAsync( renderContext ) {
const renderContextData = this.get( renderContext );
// handle occlusion query results
const { currentOcclusionQueryBuffer, currentOcclusionQueryObjects } = renderContextData;
if ( currentOcclusionQueryBuffer && currentOcclusionQueryObjects ) {
const occluded = new WeakSet();
renderContextData.currentOcclusionQueryObjects = null;
renderContextData.currentOcclusionQueryBuffer = null;
await currentOcclusionQueryBuffer.mapAsync( GPUMapMode.READ );
const buffer = currentOcclusionQueryBuffer.getMappedRange();
const results = new BigUint64Array( buffer );
for ( let i = 0; i < currentOcclusionQueryObjects.length; i ++ ) {
if ( results[ i ] === BigInt( 0 ) ) {
occluded.add( currentOcclusionQueryObjects[ i ] );
}
}
currentOcclusionQueryBuffer.destroy();
renderContextData.occluded = occluded;
}
}
/**
* Updates the viewport with the values from the given render context.
*
* @param {RenderContext} renderContext - The render context.
*/
updateViewport( renderContext ) {
const { currentPass } = this.get( renderContext );
const { x, y, width, height, minDepth, maxDepth } = renderContext.viewportValue;
currentPass.setViewport( x, y, width, height, minDepth, maxDepth );
}
/**
* Performs a clear operation.
*
* @param {Boolean} color - Whether the color buffer should be cleared or not.
* @param {Boolean} depth - Whether the depth buffer should be cleared or not.
* @param {Boolean} stencil - Whether the stencil buffer should be cleared or not.
* @param {RenderContext?} [renderTargetContext=null] - The render context of the current set render target.
*/
clear( color, depth, stencil, renderTargetContext = null ) {
const device = this.device;
const renderer = this.renderer;
let colorAttachments = [];
let depthStencilAttachment;
let clearValue;
let supportsDepth;
let supportsStencil;
if ( color ) {
const clearColor = this.getClearColor();
if ( this.renderer.alpha === true ) {
// premultiply alpha
const a = clearColor.a;
clearValue = { r: clearColor.r * a, g: clearColor.g * a, b: clearColor.b * a, a: a };
} else {
clearValue = { r: clearColor.r, g: clearColor.g, b: clearColor.b, a: clearColor.a };
}
}
if ( renderTargetContext === null ) {
supportsDepth = renderer.depth;
supportsStencil = renderer.stencil;
const descriptor = this._getDefaultRenderPassDescriptor();
if ( color ) {
colorAttachments = descriptor.colorAttachments;
const colorAttachment = colorAttachments[ 0 ];
colorAttachment.clearValue = clearValue;
colorAttachment.loadOp = GPULoadOp.Clear;
colorAttachment.storeOp = GPUStoreOp.Store;
}
if ( supportsDepth || supportsStencil ) {
depthStencilAttachment = descriptor.depthStencilAttachment;
}
} else {
supportsDepth = renderTargetContext.depth;
supportsStencil = renderTargetContext.stencil;
if ( color ) {
const descriptor = this._getRenderPassDescriptor( renderTargetContext, { loadOp: GPULoadOp.Clear, clearValue } );
colorAttachments = descriptor.colorAttachments;
}
if ( supportsDepth || supportsStencil ) {
const depthTextureData = this.get( renderTargetContext.depthTexture );
depthStencilAttachment = {
view: depthTextureData.texture.createView()
};
}
}
//
if ( supportsDepth ) {
if ( depth ) {
depthStencilAttachment.depthLoadOp = GPULoadOp.Clear;
depthStencilAttachment.depthClearValue = renderer.getClearDepth();
depthStencilAttachment.depthStoreOp = GPUStoreOp.Store;
} else {
depthStencilAttachment.depthLoadOp = GPULoadOp.Load;
depthStencilAttachment.depthStoreOp = GPUStoreOp.Store;
}
}
//
if ( supportsStencil ) {
if ( stencil ) {
depthStencilAttachment.stencilLoadOp = GPULoadOp.Clear;
depthStencilAttachment.stencilClearValue = renderer.getClearStencil();
depthStencilAttachment.stencilStoreOp = GPUStoreOp.Store;
} else {
depthStencilAttachment.stencilLoadOp = GPULoadOp.Load;
depthStencilAttachment.stencilStoreOp = GPUStoreOp.Store;
}
}
//
const encoder = device.createCommandEncoder( { label: 'clear' } );
const currentPass = encoder.beginRenderPass( {
colorAttachments,
depthStencilAttachment
} );
currentPass.end();
device.queue.submit( [ encoder.finish() ] );
}
// compute
/**
* This method is executed at the beginning of a compute call and
* prepares the state for upcoming compute tasks.
*
* @param {Node|Array<Node>} computeGroup - The compute node(s).
*/
beginCompute( computeGroup ) {
const groupGPU = this.get( computeGroup );
const descriptor = {
label: 'computeGroup_' + computeGroup.id
};
this.initTimestampQuery( computeGroup, descriptor );
groupGPU.cmdEncoderGPU = this.device.createCommandEncoder( { label: 'computeGroup_' + computeGroup.id } );
groupGPU.passEncoderGPU = groupGPU.cmdEncoderGPU.beginComputePass( descriptor );
}
/**
* Executes a compute command for the given compute node.
*
* @param {Node|Array<Node>} computeGroup - The group of compute nodes of a compute call. Can be a single compute node.
* @param {Node} computeNode - The compute node.
* @param {Array<BindGroup>} bindings - The bindings.
* @param {ComputePipeline} pipeline - The compute pipeline.
*/
compute( computeGroup, computeNode, bindings, pipeline ) {
const { passEncoderGPU } = this.get( computeGroup );
// pipeline
const pipelineGPU = this.get( pipeline ).pipeline;
passEncoderGPU.setPipeline( pipelineGPU );
// bind groups
for ( let i = 0, l = bindings.length; i < l; i ++ ) {
const bindGroup = bindings[ i ];
const bindingsData = this.get( bindGroup );
passEncoderGPU.setBindGroup( i, bindingsData.group );
}
const maxComputeWorkgroupsPerDimension = this.device.limits.maxComputeWorkgroupsPerDimension;
const computeNodeData = this.get( computeNode );
if ( computeNodeData.dispatchSize === undefined ) computeNodeData.dispatchSize = { x: 0, y: 1, z: 1 };
const { dispatchSize } = computeNodeData;
if ( computeNode.dispatchCount > maxComputeWorkgroupsPerDimension ) {
dispatchSize.x = Math.min( computeNode.dispatchCount, maxComputeWorkgroupsPerDimension );
dispatchSize.y = Math.ceil( computeNode.dispatchCount / maxComputeWorkgroupsPerDimension );
} else {
dispatchSize.x = computeNode.dispatchCount;
}
passEncoderGPU.dispatchWorkgroups(
dispatchSize.x,
dispatchSize.y,
dispatchSize.z
);
}
/**
* This method is executed at the end of a compute call and
* finalizes work after compute tasks.
*
* @param {Node|Array<Node>} computeGroup - The compute node(s).
*/
finishCompute( computeGroup ) {
const groupData = this.get( computeGroup );
groupData.passEncoderGPU.end();
this.device.queue.submit( [ groupData.cmdEncoderGPU.finish() ] );
}
/**
* Can be used to synchronize CPU operations with GPU tasks. So when this method is called,
* the CPU waits for the GPU to complete its operation (e.g. a compute task).
*
* @async
* @return {Promise} A Promise that resolves when synchronization has been finished.
*/
async waitForGPU() {
await this.device.queue.onSubmittedWorkDone();
}
// render object
/**
* Executes a draw command for the given render object.
*
* @param {RenderObject} renderObject - The render object to draw.
* @param {Info} info - Holds a series of statistical information about the GPU memory and the rendering process.
*/
draw( renderObject, info ) {
const { object, context, pipeline } = renderObject;
const bindings = renderObject.getBindings();
const renderContextData = this.get( context );
const pipelineGPU = this.get( pipeline ).pipeline;
const currentSets = renderContextData.currentSets;
const passEncoderGPU = renderContextData.currentPass;
const drawParams = renderObject.getDrawParameters();
if ( drawParams === null ) return;
// pipeline
if ( currentSets.pipeline !== pipelineGPU ) {
passEncoderGPU.setPipeline( pipelineGPU );
currentSets.pipeline = pipelineGPU;
}
// bind groups
const currentBindingGroups = currentSets.bindingGroups;
for ( let i = 0, l = bindings.length; i < l; i ++ ) {
const bindGroup = bindings[ i ];
const bindingsData = this.get( bindGroup );
if ( currentBindingGroups[ bindGroup.index ] !== bindGroup.id ) {
passEncoderGPU.setBindGroup( bindGroup.index, bindingsData.group );
currentBindingGroups[ bindGroup.index ] = bindGroup.id;
}
}
// attributes
const index = renderObject.getIndex();
const hasIndex = ( index !== null );
// index
if ( hasIndex === true ) {
if ( currentSets.index !== index ) {
const buffer = this.get( index ).buffer;
const indexFormat = ( index.array instanceof Uint16Array ) ? GPUIndexFormat.Uint16 : GPUIndexFormat.Uint32;
passEncoderGPU.setIndexBuffer( buffer, indexFormat );
currentSets.index = index;
}
}
// vertex buffers
const vertexBuffers = renderObject.getVertexBuffers();
for ( let i = 0, l = vertexBuffers.length; i < l; i ++ ) {
const vertexBuffer = vertexBuffers[ i ];
if ( currentSets.attributes[ i ] !== vertexBuffer ) {
const buffer = this.get( vertexBuffer ).buffer;
passEncoderGPU.setVertexBuffer( i, buffer );
currentSets.attributes[ i ] = vertexBuffer;
}
}
// occlusion queries - handle multiple consecutive draw calls for an object
if ( renderContextData.occlusionQuerySet !== undefined ) {
const lastObject = renderContextData.lastOcclusionObject;
if ( lastObject !== object ) {
if ( lastObject !== null && lastObject.occlusionTest === true ) {
passEncoderGPU.endOcclusionQuery();
renderContextData.occlusionQueryIndex ++;
}
if ( object.occlusionTest === true ) {
passEncoderGPU.beginOcclusionQuery( renderContextData.occlusionQueryIndex );
renderContextData.occlusionQueryObjects[ renderContextData.occlusionQueryIndex ] = object;
}
renderContextData.lastOcclusionObject = object;
}
}
// draw
const draw = () => {
if ( object.isBatchedMesh === true ) {
const starts = object._multiDrawStarts;
const counts = object._multiDrawCounts;
const drawCount = object._multiDrawCount;
const drawInstances = object._multiDrawInstances;
for ( let i = 0; i < drawCount; i ++ ) {
const count = drawInstances ? drawInstances[ i ] : 1;
const firstInstance = count > 1 ? 0 : i;
if ( hasIndex === true ) {
passEncoderGPU.drawIndexed( counts[ i ], count, starts[ i ] / index.array.BYTES_PER_ELEMENT, 0, firstInstance );
} else {
passEncoderGPU.draw( counts[ i ], count, starts[ i ], firstInstance );
}
}
} else if ( hasIndex === true ) {
const { vertexCount: indexCount, instanceCount, firstVertex: firstIndex } = drawParams;
const indirect = renderObject.getIndirect();
if ( indirect !== null ) {
const buffer = this.get( indirect ).buffer;
passEncoderGPU.drawIndexedIndirect( buffer, 0 );
} else {
passEncoderGPU.drawIndexed( indexCount, instanceCount, firstIndex, 0, 0 );
}
info.update( object, indexCount, instanceCount );
} else {
const { vertexCount, instanceCount, firstVertex } = drawParams;
const indirect = renderObject.getIndirect();
if ( indirect !== null ) {
const buffer = this.get( indirect ).buffer;
passEncoderGPU.drawIndirect( buffer, 0 );
} else {
passEncoderGPU.draw( vertexCount, instanceCount, firstVertex, 0 );
}
info.update( object, vertexCount, instanceCount );
}
};
if ( renderObject.camera.isArrayCamera && renderObject.camera.cameras.length > 0 ) {
const cameraData = this.get( renderObject.camera );
const cameras = renderObject.camera.cameras;
const cameraIndex = renderObject.getBindingGroup( 'cameraIndex' );
if ( cameraData.indexesGPU === undefined || cameraData.indexesGPU.length !== cameras.length ) {
const bindingsData = this.get( cameraIndex );
const indexesGPU = [];
const data = new Uint32Array( [ 0, 0, 0, 0 ] );
for ( let i = 0, len = cameras.length; i < len; i ++ ) {
data[ 0 ] = i;
const bindGroupIndex = this.bindingUtils.createBindGroupIndex( data, bindingsData.layout );
indexesGPU.push( bindGroupIndex );
}
cameraData.indexesGPU = indexesGPU; // TODO: Create a global library for this
}
const pixelRatio = this.renderer.getPixelRatio();
for ( let i = 0, len = cameras.length; i < len; i ++ ) {
const subCamera = cameras[ i ];
if ( object.layers.test( subCamera.layers ) ) {
const vp = subCamera.viewport;
passEncoderGPU.setViewport(
Math.floor( vp.x * pixelRatio ),
Math.floor( vp.y * pixelRatio ),
Math.floor( vp.width * pixelRatio ),
Math.floor( vp.height * pixelRatio ),
context.viewportValue.minDepth,
context.viewportValue.maxDepth
);
passEncoderGPU.setBindGroup( cameraIndex.index, cameraData.indexesGPU[ i ] );
draw();
}
}
} else {
draw();
}
}
// cache key
/**
* Returns `true` if the render pipeline requires an update.
*
* @param {RenderObject} renderObject - The render object.
* @return {Boolean} Whether the render pipeline requires an update or not.
*/
needsRenderUpdate( renderObject ) {
const data = this.get( renderObject );
const { object, material } = renderObject;
const utils = this.utils;
const sampleCount = utils.getSampleCountRenderContext( renderObject.context );
const colorSpace = utils.getCurrentColorSpace( renderObject.context );
const colorFormat = utils.getCurrentColorFormat( renderObject.context );
const depthStencilFormat = utils.getCurrentDepthStencilFormat( renderObject.context );
const primitiveTopology = utils.getPrimitiveTopology( object, material );
let needsUpdate = false;
if ( data.material !== material || data.materialVersion !== material.version ||
data.transparent !== material.transparent || data.blending !== material.blending || data.premultipliedAlpha !== material.premultipliedAlpha ||
data.blendSrc !== material.blendSrc || data.blendDst !== material.blendDst || data.blendEquation !== material.blendEquation ||
data.blendSrcAlpha !== material.blendSrcAlpha || data.blendDstAlpha !== material.blendDstAlpha || data.blendEquationAlpha !== material.blendEquationAlpha ||
data.colorWrite !== material.colorWrite || data.depthWrite !== material.depthWrite || data.depthTest !== material.depthTest || data.depthFunc !== material.depthFunc ||
data.stencilWrite !== material.stencilWrite || data.stencilFunc !== material.stencilFunc ||
data.stencilFail !== material.stencilFail || data.stencilZFail !== material.stencilZFail || data.stencilZPass !== material.stencilZPass ||
data.stencilFuncMask !== material.stencilFuncMask || data.stencilWriteMask !== material.stencilWriteMask ||
data.side !== material.side || data.alphaToCoverage !== material.alphaToCoverage ||
data.sampleCount !== sampleCount || data.colorSpace !== colorSpace ||
data.colorFormat !== colorFormat || data.depthStencilFormat !== depthStencilFormat ||
data.primitiveTopology !== primitiveTopology ||
data.clippingContextCacheKey !== renderObject.clippingContextCacheKey
) {
data.material = material; data.materialVersion = material.version;
data.transparent = material.transparent; data.blending = material.blending; data.premultipliedAlpha = material.premultipliedAlpha;
data.blendSrc = material.blendSrc; data.blendDst = material.blendDst; data.blendEquation = material.blendEquation;
data.blendSrcAlpha = material.blendSrcAlpha; data.blendDstAlpha = material.blendDstAlpha; data.blendEquationAlpha = material.blendEquationAlpha;
data.colorWrite = material.colorWrite;
data.depthWrite = material.depthWrite; data.depthTest = material.depthTest; data.depthFunc = material.depthFunc;
data.stencilWrite = material.stencilWrite; data.stencilFunc = material.stencilFunc;
data.stencilFail = material.stencilFail; data.stencilZFail = material.stencilZFail; data.stencilZPass = material.stencilZPass;
data.stencilFuncMask = material.stencilFuncMask; data.stencilWriteMask = material.stencilWriteMask;
data.side = material.side; data.alphaToCoverage = material.alphaToCoverage;
data.sampleCount = sampleCount;
data.colorSpace = colorSpace;
data.colorFormat = colorFormat;
data.depthStencilFormat = depthStencilFormat;
data.primitiveTopology = primitiveTopology;
data.clippingContextCacheKey = renderObject.clippingContextCacheKey;
needsUpdate = true;
}
return needsUpdate;
}
/**
* Returns a cache key that is used to identify render pipelines.
*
* @param {RenderObject} renderObject - The render object.
* @return {String} The cache key.
*/
getRenderCacheKey( renderObject ) {
const { object, material } = renderObject;
const utils = this.utils;
const renderContext = renderObject.context;
return [
material.transparent, material.blending, material.premultipliedAlpha,
material.blendSrc, material.blendDst, material.blendEquation,
material.blendSrcAlpha, material.blendDstAlpha, material.blendEquationAlpha,
material.colorWrite,
material.depthWrite, material.depthTest, material.depthFunc,
material.stencilWrite, material.stencilFunc,
material.stencilFail, material.stencilZFail, material.stencilZPass,
material.stencilFuncMask, material.stencilWriteMask,
material.side,
utils.getSampleCountRenderContext( renderContext ),
utils.getCurrentColorSpace( renderContext ), utils.getCurrentColorFormat( renderContext ), utils.getCurrentDepthStencilFormat( renderContext ),
utils.getPrimitiveTopology( object, material ),
renderObject.getGeometryCacheKey(),
renderObject.clippingContextCacheKey
].join();
}
// textures
/**
* Creates a GPU sampler for the given texture.
*
* @param {Texture} texture - The texture to create the sampler for.
*/
createSampler( texture ) {
this.textureUtils.createSampler( texture );
}
/**
* Destroys the GPU sampler for the given texture.
*
* @param {Texture} texture - The texture to destroy the sampler for.
*/
destroySampler( texture ) {
this.textureUtils.destroySampler( texture );
}
/**
* Creates a default texture for the given texture that can be used
* as a placeholder until the actual texture is ready for usage.
*
* @param {Texture} texture - The texture to create a default texture for.
*/
createDefaultTexture( texture ) {
this.textureUtils.createDefaultTexture( texture );
}
/**
* Defines a texture on the GPU for the given texture object.
*
* @param {Texture} texture - The texture.
* @param {Object} [options={}] - Optional configuration parameter.
*/
createTexture( texture, options ) {
this.textureUtils.createTexture( texture, options );
}
/**
* Uploads the updated texture data to the GPU.
*
* @param {Texture} texture - The texture.
* @param {Object} [options={}] - Optional configuration parameter.
*/
updateTexture( texture, options ) {
this.textureUtils.updateTexture( texture, options );
}
/**
* Generates mipmaps for the given texture.
*
* @param {Texture} texture - The texture.
*/
generateMipmaps( texture ) {
this.textureUtils.generateMipmaps( texture );
}
/**
* Destroys the GPU data for the given texture object.
*
* @param {Texture} texture - The texture.
*/
destroyTexture( texture ) {
this.textureUtils.destroyTexture( texture );
}
/**
* Returns texture data as a typed array.
*
* @async
* @param {Texture} texture - The texture to copy.
* @param {Number} x - The x coordinate of the copy origin.
* @param {Number} y - The y coordinate of the copy origin.
* @param {Number} width - The width of the copy.
* @param {Number} height - The height of the copy.
* @param {Number} faceIndex - The face index.
* @return {Promise<TypedArray>} A Promise that resolves with a typed array when the copy operation has finished.
*/
async copyTextureToBuffer( texture, x, y, width, height, faceIndex ) {
return this.textureUtils.copyTextureToBuffer( texture, x, y, width, height, faceIndex );
}
/**
* Inits a time stamp query for the given render context.
*
* @param {RenderContext} renderContext - The render context.
* @param {Object} descriptor - The query descriptor.
*/
initTimestampQuery( renderContext, descriptor ) {
if ( ! this.trackTimestamp ) return;
const type = renderContext.isComputeNode ? 'compute' : 'render';
if ( ! this.timestampQueryPool[ type ] ) {
// TODO: Variable maxQueries?
this.timestampQueryPool[ type ] = new WebGPUTimestampQueryPool( this.device, type, 2048 );
}
const timestampQueryPool = this.timestampQueryPool[ type ];
const baseOffset = timestampQueryPool.allocateQueriesForContext( renderContext );
descriptor.timestampWrites = {
querySet: timestampQueryPool.querySet,
beginningOfPassWriteIndex: baseOffset,
endOfPassWriteIndex: baseOffset + 1,
};
}
// node builder
/**
* Returns a node builder for the given render object.
*
* @param {RenderObject} object - The render object.
* @param {Renderer} renderer - The renderer.
* @return {WGSLNodeBuilder} The node builder.
*/
createNodeBuilder( object, renderer ) {
return new WGSLNodeBuilder( object, renderer );
}
// program
/**
* Creates a shader program from the given programmable stage.
*
* @param {ProgrammableStage} program - The programmable stage.
*/
createProgram( program ) {
const programGPU = this.get( program );
programGPU.module = {
module: this.device.createShaderModule( { code: program.code, label: program.stage + ( program.name !== '' ? `_${ program.name }` : '' ) } ),
entryPoint: 'main'
};
}
/**
* Destroys the shader program of the given programmable stage.
*
* @param {ProgrammableStage} program - The programmable stage.
*/
destroyProgram( program ) {
this.delete( program );
}
// pipelines
/**
* Creates a render pipeline for the given render object.
*
* @param {RenderObject} renderObject - The render object.
* @param {Array<Promise>} promises - An array of compilation promises which are used in `compileAsync()`.
*/
createRenderPipeline( renderObject, promises ) {
this.pipelineUtils.createRenderPipeline( renderObject, promises );
}
/**
* Creates a compute pipeline for the given compute node.
*
* @param {ComputePipeline} computePipeline - The compute pipeline.
* @param {Array<BindGroup>} bindings - The bindings.
*/
createComputePipeline( computePipeline, bindings ) {
this.pipelineUtils.createComputePipeline( computePipeline, bindings );
}
/**
* Prepares the state for encoding render bundles.
*
* @param {RenderContext} renderContext - The render context.
*/
beginBundle( renderContext ) {
const renderContextData = this.get( renderContext );
renderContextData._currentPass = renderContextData.currentPass;
renderContextData._currentSets = renderContextData.currentSets;
renderContextData.currentSets = { attributes: {}, bindingGroups: [], pipeline: null, index: null };
renderContextData.currentPass = this.pipelineUtils.createBundleEncoder( renderContext );
}
/**
* After processing render bundles this method finalizes related work.
*
* @param {RenderContext} renderContext - The render context.
* @param {RenderBundle} bundle - The render bundle.
*/
finishBundle( renderContext, bundle ) {
const renderContextData = this.get( renderContext );
const bundleEncoder = renderContextData.currentPass;
const bundleGPU = bundleEncoder.finish();
this.get( bundle ).bundleGPU = bundleGPU;
// restore render pass state
renderContextData.currentSets = renderContextData._currentSets;
renderContextData.currentPass = renderContextData._currentPass;
}
/**
* Adds a render bundle to the render context data.
*
* @param {RenderContext} renderContext - The render context.
* @param {RenderBundle} bundle - The render bundle to add.
*/
addBundle( renderContext, bundle ) {
const renderContextData = this.get( renderContext );
renderContextData.renderBundles.push( this.get( bundle ).bundleGPU );
}
// bindings
/**
* Creates bindings from the given bind group definition.
*
* @param {BindGroup} bindGroup - The bind group.
* @param {Array<BindGroup>} bindings - Array of bind groups.
* @param {Number} cacheIndex - The cache index.
* @param {Number} version - The version.
*/
createBindings( bindGroup, bindings, cacheIndex, version ) {
this.bindingUtils.createBindings( bindGroup, bindings, cacheIndex, version );
}
/**
* Updates the given bind group definition.
*
* @param {BindGroup} bindGroup - The bind group.
* @param {Array<BindGroup>} bindings - Array of bind groups.
* @param {Number} cacheIndex - The cache index.
* @param {Number} version - The version.
*/
updateBindings( bindGroup, bindings, cacheIndex, version ) {
this.bindingUtils.createBindings( bindGroup, bindings, cacheIndex, version );
}
/**
* Updates a buffer binding.
*
* @param {Buffer} binding - The buffer binding to update.
*/
updateBinding( binding ) {
this.bindingUtils.updateBinding( binding );
}
// attributes
/**
* Creates the buffer of an indexed shader attribute.
*
* @param {BufferAttribute} attribute - The indexed buffer attribute.
*/
createIndexAttribute( attribute ) {
this.attributeUtils.createAttribute( attribute, GPUBufferUsage.INDEX | GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST );
}
/**
* Creates the GPU buffer of a shader attribute.
*
* @param {BufferAttribute} attribute - The buffer attribute.
*/
createAttribute( attribute ) {
this.attributeUtils.createAttribute( attribute, GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST );
}
/**
* Creates the GPU buffer of a storage attribute.
*
* @param {BufferAttribute} attribute - The buffer attribute.
*/
createStorageAttribute( attribute ) {
this.attributeUtils.createAttribute( attribute, GPUBufferUsage.STORAGE | GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST );
}
/**
* Creates the GPU buffer of an indirect storage attribute.
*
* @param {BufferAttribute} attribute - The buffer attribute.
*/
createIndirectStorageAttribute( attribute ) {
this.attributeUtils.createAttribute( attribute, GPUBufferUsage.STORAGE | GPUBufferUsage.INDIRECT | GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST );
}
/**
* Updates the GPU buffer of a shader attribute.
*
* @param {BufferAttribute} attribute - The buffer attribute to update.
*/
updateAttribute( attribute ) {
this.attributeUtils.updateAttribute( attribute );
}
/**
* Destroys the GPU buffer of a shader attribute.
*
* @param {BufferAttribute} attribute - The buffer attribute to destroy.
*/
destroyAttribute( attribute ) {
this.attributeUtils.destroyAttribute( attribute );
}
// canvas
/**
* Triggers an update of the default render pass descriptor.
*/
updateSize() {
this.colorBuffer = this.textureUtils.getColorBuffer();
this.defaultRenderPassdescriptor = null;
}
// utils public
/**
* Returns the maximum anisotropy texture filtering value.
*
* @return {Number} The maximum anisotropy texture filtering value.
*/
getMaxAnisotropy() {
return 16;
}
/**
* Checks if the given feature is supported by the backend.
*
* @param {String} name - The feature's name.
* @return {Boolean} Whether the feature is supported or not.
*/
hasFeature( name ) {
return this.device.features.has( name );
}
/**
* Copies data of the given source texture to the given destination texture.
*
* @param {Texture} srcTexture - The source texture.
* @param {Texture} dstTexture - The destination texture.
* @param {Vector4?} [srcRegion=null] - The region of the source texture to copy.
* @param {(Vector2|Vector3)?} [dstPosition=null] - The destination position of the copy.
* @param {Number} [level=0] - The mip level to copy.
*/
copyTextureToTexture( srcTexture, dstTexture, srcRegion = null, dstPosition = null, level = 0 ) {
let dstX = 0;
let dstY = 0;
let dstLayer = 0;
let srcX = 0;
let srcY = 0;
let srcLayer = 0;
let srcWidth = srcTexture.image.width;
let srcHeight = srcTexture.image.height;
if ( srcRegion !== null ) {
srcX = srcRegion.x;
srcY = srcRegion.y;
srcLayer = srcRegion.z || 0;
srcWidth = srcRegion.width;
srcHeight = srcRegion.height;
}
if ( dstPosition !== null ) {
dstX = dstPosition.x;
dstY = dstPosition.y;
dstLayer = dstPosition.z || 0;
}
const encoder = this.device.createCommandEncoder( { label: 'copyTextureToTexture_' + srcTexture.id + '_' + dstTexture.id } );
const sourceGPU = this.get( srcTexture ).texture;
const destinationGPU = this.get( dstTexture ).texture;
encoder.copyTextureToTexture(
{
texture: sourceGPU,
mipLevel: level,
origin: { x: srcX, y: srcY, z: srcLayer }
},
{
texture: destinationGPU,
mipLevel: level,
origin: { x: dstX, y: dstY, z: dstLayer }
},
[
srcWidth,
srcHeight,
1
]
);
this.device.queue.submit( [ encoder.finish() ] );
}
/**
* Copies the current bound framebuffer to the given texture.
*
* @param {Texture} texture - The destination texture.
* @param {RenderContext} renderContext - The render context.
* @param {Vector4} rectangle - A four dimensional vector defining the origin and dimension of the copy.
*/
copyFramebufferToTexture( texture, renderContext, rectangle ) {
const renderContextData = this.get( renderContext );
let sourceGPU = null;
if ( renderContext.renderTarget ) {
if ( texture.isDepthTexture ) {
sourceGPU = this.get( renderContext.depthTexture ).texture;
} else {
sourceGPU = this.get( renderContext.textures[ 0 ] ).texture;
}
} else {
if ( texture.isDepthTexture ) {
sourceGPU = this.textureUtils.getDepthBuffer( renderContext.depth, renderContext.stencil );
} else {
sourceGPU = this.context.getCurrentTexture();
}
}
const destinationGPU = this.get( texture ).texture;
if ( sourceGPU.format !== destinationGPU.format ) {
console.error( 'WebGPUBackend: copyFramebufferToTexture: Source and destination formats do not match.', sourceGPU.format, destinationGPU.format );
return;
}
let encoder;
if ( renderContextData.currentPass ) {
renderContextData.currentPass.end();
encoder = renderContextData.encoder;
} else {
encoder = this.device.createCommandEncoder( { label: 'copyFramebufferToTexture_' + texture.id } );
}
encoder.copyTextureToTexture(
{
texture: sourceGPU,
origin: [ rectangle.x, rectangle.y, 0 ],
},
{
texture: destinationGPU
},
[
rectangle.z,
rectangle.w
]
);
if ( texture.generateMipmaps ) this.textureUtils.generateMipmaps( texture );
if ( renderContextData.currentPass ) {
const { descriptor } = renderContextData;
for ( let i = 0; i < descriptor.colorAttachments.length; i ++ ) {
descriptor.colorAttachments[ i ].loadOp = GPULoadOp.Load;
}
if ( renderContext.depth ) descriptor.depthStencilAttachment.depthLoadOp = GPULoadOp.Load;
if ( renderContext.stencil ) descriptor.depthStencilAttachment.stencilLoadOp = GPULoadOp.Load;
renderContextData.currentPass = encoder.beginRenderPass( descriptor );
renderContextData.currentSets = { attributes: {}, bindingGroups: [], pipeline: null, index: null };
if ( renderContext.viewport ) {
this.updateViewport( renderContext );
}
if ( renderContext.scissor ) {
const { x, y, width, height } = renderContext.scissorValue;
renderContextData.currentPass.setScissorRect( x, y, width, height );
}
} else {
this.device.queue.submit( [ encoder.finish() ] );
}
}
}
class IESSpotLight extends SpotLight {
constructor( color, intensity, distance, angle, penumbra, decay ) {
super( color, intensity, distance, angle, penumbra, decay );
this.iesMap = null;
}
copy( source, recursive ) {
super.copy( source, recursive );
this.iesMap = source.iesMap;
return this;
}
}
/**
* This version of a node library represents the standard version
* used in {@link WebGPURenderer}. It maps lights, tone mapping
* techniques and materials to node-based implementations.
*
* @private
* @augments NodeLibrary
*/
class StandardNodeLibrary extends NodeLibrary {
/**
* Constructs a new standard node library.
*/
constructor() {
super();
this.addMaterial( MeshPhongNodeMaterial, 'MeshPhongMaterial' );
this.addMaterial( MeshStandardNodeMaterial, 'MeshStandardMaterial' );
this.addMaterial( MeshPhysicalNodeMaterial, 'MeshPhysicalMaterial' );
this.addMaterial( MeshToonNodeMaterial, 'MeshToonMaterial' );
this.addMaterial( MeshBasicNodeMaterial, 'MeshBasicMaterial' );
this.addMaterial( MeshLambertNodeMaterial, 'MeshLambertMaterial' );
this.addMaterial( MeshNormalNodeMaterial, 'MeshNormalMaterial' );
this.addMaterial( MeshMatcapNodeMaterial, 'MeshMatcapMaterial' );
this.addMaterial( LineBasicNodeMaterial, 'LineBasicMaterial' );
this.addMaterial( LineDashedNodeMaterial, 'LineDashedMaterial' );
this.addMaterial( PointsNodeMaterial, 'PointsMaterial' );
this.addMaterial( SpriteNodeMaterial, 'SpriteMaterial' );
this.addMaterial( ShadowNodeMaterial, 'ShadowMaterial' );
this.addLight( PointLightNode, PointLight );
this.addLight( DirectionalLightNode, DirectionalLight );
this.addLight( RectAreaLightNode, RectAreaLight );
this.addLight( SpotLightNode, SpotLight );
this.addLight( AmbientLightNode, AmbientLight );
this.addLight( HemisphereLightNode, HemisphereLight );
this.addLight( LightProbeNode, LightProbe );
this.addLight( IESSpotLightNode, IESSpotLight );
this.addToneMapping( linearToneMapping, LinearToneMapping );
this.addToneMapping( reinhardToneMapping, ReinhardToneMapping );
this.addToneMapping( cineonToneMapping, CineonToneMapping );
this.addToneMapping( acesFilmicToneMapping, ACESFilmicToneMapping );
this.addToneMapping( agxToneMapping, AgXToneMapping );
this.addToneMapping( neutralToneMapping, NeutralToneMapping );
}
}
/*
const debugHandler = {
get: function ( target, name ) {
// Add |update
if ( /^(create|destroy)/.test( name ) ) console.log( 'WebGPUBackend.' + name );
return target[ name ];
}
};
*/
/**
* This renderer is the new alternative of `WebGLRenderer`. `WebGPURenderer` has the ability
* to target different backends. By default, the renderer tries to use a WebGPU backend if the
* browser supports WebGPU. If not, `WebGPURenderer` falls backs to a WebGL 2 backend.
*
* @augments module:Renderer~Renderer
*/
class WebGPURenderer extends Renderer {
/**
* Constructs a new WebGPU renderer.
*
* @param {Object} parameters - The configuration parameter.
* @param {Boolean} [parameters.logarithmicDepthBuffer=false] - Whether logarithmic depth buffer is enabled or not.
* @param {Boolean} [parameters.alpha=true] - Whether the default framebuffer (which represents the final contents of the canvas) should be transparent or opaque.
* @param {Boolean} [parameters.depth=true] - Whether the default framebuffer should have a depth buffer or not.
* @param {Boolean} [parameters.stencil=false] - Whether the default framebuffer should have a stencil buffer or not.
* @param {Boolean} [parameters.antialias=false] - Whether MSAA as the default anti-aliasing should be enabled or not.
* @param {Number} [parameters.samples=0] - When `antialias` is `true`, `4` samples are used by default. Set this parameter to any other integer value than 0 to overwrite the default.
* @param {Boolean} [parameters.forceWebGL=false] - If set to `true`, the renderer uses a WebGL 2 backend no matter if WebGPU is supported or not.
* @param {Number} [parameters.outputType=undefined] - Texture type for output to canvas. By default, device's preferred format is used; other formats may incur overhead.
* @param {Number} [parameters.colorBufferType=HalfFloatType] - Defines the type of color buffers. The default `HalfFloatType` is recommend for best
* quality. To save memory and bandwidth, `UnsignedByteType` might be used. This will reduce rendering quality though.
*/
constructor( parameters = {} ) {
let BackendClass;
if ( parameters.forceWebGL ) {
BackendClass = WebGLBackend;
} else {
BackendClass = WebGPUBackend;
parameters.getFallback = () => {
console.warn( 'THREE.WebGPURenderer: WebGPU is not available, running under WebGL2 backend.' );
return new WebGLBackend( parameters );
};
}
const backend = new BackendClass( parameters );
//super( new Proxy( backend, debugHandler ) );
super( backend, parameters );
/**
* The generic default value is overwritten with the
* standard node library for type mapping.
*
* @type {StandardNodeLibrary}
*/
this.library = new StandardNodeLibrary();
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isWebGPURenderer = true;
}
}
/**
* A specialized group which enables applications access to the
* Render Bundle API of WebGPU. The group with all its descendant nodes
* are considered as one render bundle and processed as such by
* the renderer.
*
* This module is only fully supported by `WebGPURenderer` with a WebGPU backend.
* With a WebGL backend, the group can technically be rendered but without
* any performance improvements.
*
* @augments Group
*/
class BundleGroup extends Group {
/**
* Constructs a new bundle group.
*/
constructor() {
super();
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isBundleGroup = true;
/**
* This property is only relevant for detecting types
* during serialization/deserialization. It should always
* match the class name.
*
* @type {String}
* @readonly
* @default 'BundleGroup'
*/
this.type = 'BundleGroup';
/**
* Whether the bundle is static or not. When set to `true`, the structure
* is assumed to be static and does not change. E.g. no new objects are
* added to the group
*
* If a change is required, an update can still be forced by setting the
* `needsUpdate` flag to `true`.
*
* @type {Boolean}
* @default true
*/
this.static = true;
/**
* The bundle group's version.
*
* @type {Number}
* @readonly
* @default 0
*/
this.version = 0;
}
/**
* Set this property to `true` when the bundle group has changed.
*
* @type {Boolean}
* @default false
* @param {Boolean} value
*/
set needsUpdate( value ) {
if ( value === true ) this.version ++;
}
}
/**
* This module is responsible to manage the post processing setups in apps.
* You usually create a single instance of this class and use it to define
* the output of your post processing effect chain.
* ```js
* const postProcessing = new PostProcessing( renderer );
*
* const scenePass = pass( scene, camera );
*
* postProcessing.outputNode = scenePass;
* ```
*/
class PostProcessing {
/**
* Constructs a new post processing management module.
*
* @param {Renderer} renderer - A reference to the renderer.
* @param {Node<vec4>} outputNode - An optional output node.
*/
constructor( renderer, outputNode = vec4( 0, 0, 1, 1 ) ) {
/**
* A reference to the renderer.
*
* @type {Renderer}
*/
this.renderer = renderer;
/**
* A node which defines the final output of the post
* processing. This is usually the last node in a chain
* of effect nodes.
*
* @type {Node<vec4>}
*/
this.outputNode = outputNode;
/**
* Whether the default output tone mapping and color
* space transformation should be enabled or not.
*
* It is enabled by default by it must be disabled when
* effects must be executed after tone mapping and color
* space conversion. A typical example is FXAA which
* requires sRGB input.
*
* When set to `false`, the app must control the output
* transformation with `RenderOutputNode`.
*
* ```js
* const outputPass = renderOutput( scenePass );
* ```
*
* @type {Boolean}
*/
this.outputColorTransform = true;
/**
* Must be set to `true` when the output node changes.
*
* @type {Node<vec4>}
*/
this.needsUpdate = true;
const material = new NodeMaterial();
material.name = 'PostProcessing';
/**
* The full screen quad that is used to render
* the effects.
*
* @private
* @type {QuadMesh}
*/
this._quadMesh = new QuadMesh( material );
}
/**
* When `PostProcessing` is used to apply post processing effects,
* the application must use this version of `render()` inside
* its animation loop (not the one from the renderer).
*/
render() {
this._update();
const renderer = this.renderer;
const toneMapping = renderer.toneMapping;
const outputColorSpace = renderer.outputColorSpace;
renderer.toneMapping = NoToneMapping;
renderer.outputColorSpace = LinearSRGBColorSpace;
//
this._quadMesh.render( renderer );
//
renderer.toneMapping = toneMapping;
renderer.outputColorSpace = outputColorSpace;
}
/**
* Frees internal resources.
*/
dispose() {
this._quadMesh.material.dispose();
}
/**
* Updates the state of the module.
*
* @private
*/
_update() {
if ( this.needsUpdate === true ) {
const renderer = this.renderer;
const toneMapping = renderer.toneMapping;
const outputColorSpace = renderer.outputColorSpace;
this._quadMesh.material.fragmentNode = this.outputColorTransform === true ? renderOutput( this.outputNode, toneMapping, outputColorSpace ) : this.outputNode.context( { toneMapping, outputColorSpace } );
this._quadMesh.material.needsUpdate = true;
this.needsUpdate = false;
}
}
/**
* When `PostProcessing` is used to apply post processing effects,
* the application must use this version of `renderAsync()` inside
* its animation loop (not the one from the renderer).
*
* @async
* @return {Promise} A Promise that resolves when the render has been finished.
*/
async renderAsync() {
this._update();
const renderer = this.renderer;
const toneMapping = renderer.toneMapping;
const outputColorSpace = renderer.outputColorSpace;
renderer.toneMapping = NoToneMapping;
renderer.outputColorSpace = LinearSRGBColorSpace;
//
await this._quadMesh.renderAsync( renderer );
//
renderer.toneMapping = toneMapping;
renderer.outputColorSpace = outputColorSpace;
}
}
/**
* This special type of texture is intended for compute shaders.
* It can be used to compute the data of a texture with a compute shader.
*
* Note: This type of texture can only be used with `WebGPURenderer`
* and a WebGPU backend.
*
* @augments Texture
*/
class StorageTexture extends Texture {
/**
* Constructs a new storage texture.
*
* @param {Number} [width=1] - The storage texture's width.
* @param {Number} [height=1] - The storage texture's height.
*/
constructor( width = 1, height = 1 ) {
super();
/**
* The image object which just represents the texture's dimension.
*
* @type {{width: Number, height: Number}}
*/
this.image = { width, height };
/**
* The default `magFilter` for storage textures is `THREE.LinearFilter`.
*
* @type {Number}
*/
this.magFilter = LinearFilter;
/**
* The default `minFilter` for storage textures is `THREE.LinearFilter`.
*
* @type {Number}
*/
this.minFilter = LinearFilter;
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isStorageTexture = true;
}
}
/**
* This special type of buffer attribute is intended for compute shaders.
* It can be used to encode draw parameters for indirect draw calls.
*
* Note: This type of buffer attribute can only be used with `WebGPURenderer`
* and a WebGPU backend.
*
* @augments StorageBufferAttribute
*/
class IndirectStorageBufferAttribute extends StorageBufferAttribute {
/**
* Constructs a new storage buffer attribute.
*
* @param {Number|Uint32Array} count - The item count. It is also valid to pass a `Uint32Array` as an argument.
* The subsequent parameter is then obsolete.
* @param {Number} itemSize - The item size.
*/
constructor( count, itemSize ) {
super( count, itemSize, Uint32Array );
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isIndirectStorageBufferAttribute = true;
}
}
/**
* A loader for loading node objects in the three.js JSON Object/Scene format.
*
* @augments Loader
*/
class NodeLoader extends Loader {
/**
* Constructs a new node loader.
*
* @param {LoadingManager?} manager - A reference to a loading manager.
*/
constructor( manager ) {
super( manager );
/**
* Represents a dictionary of textures.
*
* @type {Object<String,Texture>}
*/
this.textures = {};
/**
* Represents a dictionary of node types.
*
* @type {Object<String,Node.constructor>}
*/
this.nodes = {};
}
/**
* Loads the node definitions from the given URL.
*
* @param {String} url - The path/URL of the file to be loaded.
* @param {Function} onLoad - Will be called when load completes.
* @param {Function} onProgress - Will be called while load progresses.
* @param {Function} onError - Will be called when errors are thrown during the loading process.
*/
load( url, onLoad, onProgress, onError ) {
const loader = new FileLoader( this.manager );
loader.setPath( this.path );
loader.setRequestHeader( this.requestHeader );
loader.setWithCredentials( this.withCredentials );
loader.load( url, ( text ) => {
try {
onLoad( this.parse( JSON.parse( text ) ) );
} catch ( e ) {
if ( onError ) {
onError( e );
} else {
console.error( e );
}
this.manager.itemError( url );
}
}, onProgress, onError );
}
/**
* Parse the node dependencies for the loaded node.
*
* @param {Object} json - The JSON definition
* @return {Object<String,Node>} A dictionary with node dependencies.
*/
parseNodes( json ) {
const nodes = {};
if ( json !== undefined ) {
for ( const nodeJSON of json ) {
const { uuid, type } = nodeJSON;
nodes[ uuid ] = this.createNodeFromType( type );
nodes[ uuid ].uuid = uuid;
}
const meta = { nodes, textures: this.textures };
for ( const nodeJSON of json ) {
nodeJSON.meta = meta;
const node = nodes[ nodeJSON.uuid ];
node.deserialize( nodeJSON );
delete nodeJSON.meta;
}
}
return nodes;
}
/**
* Parses the node from the given JSON.
*
* @param {Object} json - The JSON definition
* @return {Node} The parsed node.
*/
parse( json ) {
const node = this.createNodeFromType( json.type );
node.uuid = json.uuid;
const nodes = this.parseNodes( json.nodes );
const meta = { nodes, textures: this.textures };
json.meta = meta;
node.deserialize( json );
delete json.meta;
return node;
}
/**
* Defines the dictionary of textures.
*
* @param {Object<String,Texture>} value - The texture library defines as `<uuid,texture>`.
* @return {NodeLoader} A reference to this loader.
*/
setTextures( value ) {
this.textures = value;
return this;
}
/**
* Defines the dictionary of node types.
*
* @param {Object<String,Node.constructor>} value - The node library defined as `<classname,class>`.
* @return {NodeLoader} A reference to this loader.
*/
setNodes( value ) {
this.nodes = value;
return this;
}
/**
* Creates a node object from the given type.
*
* @param {String} type - The node type.
* @return {Node} The created node instance.
*/
createNodeFromType( type ) {
if ( this.nodes[ type ] === undefined ) {
console.error( 'THREE.NodeLoader: Node type not found:', type );
return float();
}
return nodeObject( new this.nodes[ type ]() );
}
}
/**
* A special type of material loader for loading node materials.
*
* @augments MaterialLoader
*/
class NodeMaterialLoader extends MaterialLoader {
/**
* Constructs a new node material loader.
*
* @param {LoadingManager?} manager - A reference to a loading manager.
*/
constructor( manager ) {
super( manager );
/**
* Represents a dictionary of node types.
*
* @type {Object<String,Node.constructor>}
*/
this.nodes = {};
/**
* Represents a dictionary of node material types.
*
* @type {Object<String,NodeMaterial.constructor>}
*/
this.nodeMaterials = {};
}
/**
* Parses the node material from the given JSON.
*
* @param {Object} json - The JSON definition
* @return {NodeMaterial}. The parsed material.
*/
parse( json ) {
const material = super.parse( json );
const nodes = this.nodes;
const inputNodes = json.inputNodes;
for ( const property in inputNodes ) {
const uuid = inputNodes[ property ];
material[ property ] = nodes[ uuid ];
}
return material;
}
/**
* Defines the dictionary of node types.
*
* @param {Object<String,Node.constructor>} value - The node library defined as `<classname,class>`.
* @return {NodeLoader} A reference to this loader.
*/
setNodes( value ) {
this.nodes = value;
return this;
}
/**
* Defines the dictionary of node material types.
*
* @param {Object<String,NodeMaterial.constructor>} value - The node material library defined as `<classname,class>`.
* @return {NodeLoader} A reference to this loader.
*/
setNodeMaterials( value ) {
this.nodeMaterials = value;
return this;
}
/**
* Creates a node material from the given type.
*
* @param {String} type - The node material type.
* @return {Node} The created node material instance.
*/
createMaterialFromType( type ) {
const materialClass = this.nodeMaterials[ type ];
if ( materialClass !== undefined ) {
return new materialClass();
}
return super.createMaterialFromType( type );
}
}
/**
* A special type of object loader for loading 3D objects using
* node materials.
*
* @augments ObjectLoader
*/
class NodeObjectLoader extends ObjectLoader {
/**
* Constructs a new node object loader.
*
* @param {LoadingManager?} manager - A reference to a loading manager.
*/
constructor( manager ) {
super( manager );
/**
* Represents a dictionary of node types.
*
* @type {Object<String,Node.constructor>}
*/
this.nodes = {};
/**
* Represents a dictionary of node material types.
*
* @type {Object<String,NodeMaterial.constructor>}
*/
this.nodeMaterials = {};
/**
* A reference to hold the `nodes` JSON property.
*
* @private
* @type {Object?}
*/
this._nodesJSON = null;
}
/**
* Defines the dictionary of node types.
*
* @param {Object<String,Node.constructor>} value - The node library defined as `<classname,class>`.
* @return {NodeLoader} A reference to this loader.
*/
setNodes( value ) {
this.nodes = value;
return this;
}
/**
* Defines the dictionary of node material types.
*
* @param {Object<String,NodeMaterial.constructor>} value - The node material library defined as `<classname,class>`.
* @return {NodeLoader} A reference to this loader.
*/
setNodeMaterials( value ) {
this.nodeMaterials = value;
return this;
}
/**
* Parses the node objects from the given JSON.
*
* @param {Object} json - The JSON definition
* @param {Function} onLoad - The onLoad callback function.
* @return {Object3D}. The parsed 3D object.
*/
parse( json, onLoad ) {
this._nodesJSON = json.nodes;
const data = super.parse( json, onLoad );
this._nodesJSON = null; // dispose
return data;
}
/**
* Parses the node objects from the given JSON and textures.
*
* @param {Object} json - The JSON definition
* @param {Object<String,Texture>} textures - The texture library.
* @return {Object<String,Node>}. The parsed nodes.
*/
parseNodes( json, textures ) {
if ( json !== undefined ) {
const loader = new NodeLoader();
loader.setNodes( this.nodes );
loader.setTextures( textures );
return loader.parseNodes( json );
}
return {};
}
/**
* Parses the node objects from the given JSON and textures.
*
* @param {Object} json - The JSON definition
* @param {Object<String,Texture>} textures - The texture library.
* @return {Object<String,NodeMaterial>}. The parsed materials.
*/
parseMaterials( json, textures ) {
const materials = {};
if ( json !== undefined ) {
const nodes = this.parseNodes( this._nodesJSON, textures );
const loader = new NodeMaterialLoader();
loader.setTextures( textures );
loader.setNodes( nodes );
loader.setNodeMaterials( this.nodeMaterials );
for ( let i = 0, l = json.length; i < l; i ++ ) {
const data = json[ i ];
materials[ data.uuid ] = loader.parse( data );
}
}
return materials;
}
}
/**
* In earlier three.js versions, clipping was defined globally
* on the renderer or on material level. This special version of
* `THREE.Group` allows to encode the clipping state into the scene
* graph. Meaning if you create an instance of this group, all
* descendant 3D objects will be affected by the respective clipping
* planes.
*
* Note: `ClippingGroup` can only be used with `WebGPURenderer`.
*
* @augments Group
*/
class ClippingGroup extends Group {
/**
* Constructs a new clipping group.
*/
constructor() {
super();
/**
* This flag can be used for type testing.
*
* @type {Boolean}
* @readonly
* @default true
*/
this.isClippingGroup = true;
/**
* An array with clipping planes.
*
* @type {Array<Plane>}
*/
this.clippingPlanes = [];
/**
* Whether clipping should be enabled or not.
*
* @type {Boolean}
* @default true
*/
this.enabled = true;
/**
* Whether the intersection of the clipping planes is used to clip objects, rather than their union.
*
* @type {Boolean}
* @default false
*/
this.clipIntersection = false;
/**
* Whether shadows should be clipped or not.
*
* @type {Boolean}
* @default false
*/
this.clipShadows = false;
}
}
export { ACESFilmicToneMapping, AONode, AddEquation, AddOperation, AdditiveBlending, AgXToneMapping, AlphaFormat, AlwaysCompare, AlwaysDepth, AlwaysStencilFunc, AmbientLight, AmbientLightNode, AnalyticLightNode, ArrayCamera, ArrayElementNode, ArrayNode, AssignNode, AttributeNode, BackSide, BasicEnvironmentNode, BasicShadowMap, BatchNode, BoxGeometry, BufferAttribute, BufferAttributeNode, BufferGeometry, BufferNode, BumpMapNode, BundleGroup, BypassNode, ByteType, CacheNode, Camera, CineonToneMapping, ClampToEdgeWrapping, ClippingGroup, CodeNode, Color, ColorManagement, ColorSpaceNode, ComputeNode, ConstNode, ContextNode, ConvertNode, CubeCamera, CubeReflectionMapping, CubeRefractionMapping, CubeTexture, CubeTextureNode, CubeUVReflectionMapping, CullFaceBack, CullFaceFront, CullFaceNone, CustomBlending, DataArrayTexture, DataTexture, DecrementStencilOp, DecrementWrapStencilOp, DepthFormat, DepthStencilFormat, DepthTexture, DirectionalLight, DirectionalLightNode, DoubleSide, DstAlphaFactor, DstColorFactor, DynamicDrawUsage, EnvironmentNode, EqualCompare, EqualDepth, EqualStencilFunc, EquirectUVNode, EquirectangularReflectionMapping, EquirectangularRefractionMapping, Euler, EventDispatcher, ExpressionNode, FileLoader, Float16BufferAttribute, Float32BufferAttribute, FloatType, FramebufferTexture, FrontFacingNode, FrontSide, Frustum, FunctionCallNode, FunctionNode, FunctionOverloadingNode, GLSLNodeParser, GreaterCompare, GreaterDepth, GreaterEqualCompare, GreaterEqualDepth, GreaterEqualStencilFunc, GreaterStencilFunc, Group, HalfFloatType, HemisphereLight, HemisphereLightNode, IESSpotLight, IESSpotLightNode, IncrementStencilOp, IncrementWrapStencilOp, IndexNode, IndirectStorageBufferAttribute, InstanceNode, InstancedBufferAttribute, InstancedInterleavedBuffer, InstancedMeshNode, IntType, InterleavedBuffer, InterleavedBufferAttribute, InvertStencilOp, IrradianceNode, JoinNode, KeepStencilOp, LessCompare, LessDepth, LessEqualCompare, LessEqualDepth, LessEqualStencilFunc, LessStencilFunc, LightProbe, LightProbeNode, Lighting, LightingContextNode, LightingModel, LightingNode, LightsNode, Line2NodeMaterial, LineBasicMaterial, LineBasicNodeMaterial, LineDashedMaterial, LineDashedNodeMaterial, LinearFilter, LinearMipMapLinearFilter, LinearMipmapLinearFilter, LinearMipmapNearestFilter, LinearSRGBColorSpace, LinearToneMapping, Loader, LoopNode, LuminanceAlphaFormat, LuminanceFormat, MRTNode, MatcapUVNode, Material, MaterialLoader, MaterialNode, MaterialReferenceNode, MathUtils, Matrix2, Matrix3, Matrix4, MaxEquation, MaxMipLevelNode, MemberNode, Mesh, MeshBasicMaterial, MeshBasicNodeMaterial, MeshLambertMaterial, MeshLambertNodeMaterial, MeshMatcapMaterial, MeshMatcapNodeMaterial, MeshNormalMaterial, MeshNormalNodeMaterial, MeshPhongMaterial, MeshPhongNodeMaterial, MeshPhysicalMaterial, MeshPhysicalNodeMaterial, MeshSSSNodeMaterial, MeshStandardMaterial, MeshStandardNodeMaterial, MeshToonMaterial, MeshToonNodeMaterial, MinEquation, MirroredRepeatWrapping, MixOperation, ModelNode, MorphNode, MultiplyBlending, MultiplyOperation, NearestFilter, NearestMipmapLinearFilter, NearestMipmapNearestFilter, NeutralToneMapping, NeverCompare, NeverDepth, NeverStencilFunc, NoBlending, NoColorSpace, NoToneMapping, Node, NodeAccess, NodeAttribute, NodeBuilder, NodeCache, NodeCode, NodeFrame, NodeFunctionInput, NodeLoader, NodeMaterial, NodeMaterialLoader, NodeMaterialObserver, NodeObjectLoader, NodeShaderStage, NodeType, NodeUniform, NodeUpdateType, NodeUtils, NodeVar, NodeVarying, NormalBlending, NormalMapNode, NotEqualCompare, NotEqualDepth, NotEqualStencilFunc, Object3D, Object3DNode, ObjectLoader, ObjectSpaceNormalMap, OneFactor, OneMinusDstAlphaFactor, OneMinusDstColorFactor, OneMinusSrcAlphaFactor, OneMinusSrcColorFactor, OrthographicCamera, OutputStructNode, PCFShadowMap, PMREMGenerator, PMREMNode, ParameterNode, PassNode, PerspectiveCamera, PhongLightingModel, PhysicalLightingModel, Plane, PointLight, PointLightNode, PointUVNode, PointsMaterial, PointsNodeMaterial, PostProcessing, PosterizeNode, PropertyNode, QuadMesh, RED_GREEN_RGTC2_Format, RED_RGTC1_Format, REVISION, RGBAFormat, RGBAIntegerFormat, RGBA_ASTC_10x10_Format, RGBA_ASTC_10x5_Format, RGBA_ASTC_10x6_Format, RGBA_ASTC_10x8_Format, RGBA_ASTC_12x10_Format, RGBA_ASTC_12x12_Format, RGBA_ASTC_4x4_Format, RGBA_ASTC_5x4_Format, RGBA_ASTC_5x5_Format, RGBA_ASTC_6x5_Format, RGBA_ASTC_6x6_Format, RGBA_ASTC_8x5_Format, RGBA_ASTC_8x6_Format, RGBA_ASTC_8x8_Format, RGBA_BPTC_Format, RGBA_ETC2_EAC_Format, RGBA_PVRTC_2BPPV1_Format, RGBA_PVRTC_4BPPV1_Format, RGBA_S3TC_DXT1_Format, RGBA_S3TC_DXT3_Format, RGBA_S3TC_DXT5_Format, RGBFormat, RGBIntegerFormat, RGB_ETC1_Format, RGB_ETC2_Format, RGB_PVRTC_2BPPV1_Format, RGB_PVRTC_4BPPV1_Format, RGB_S3TC_DXT1_Format, RGFormat, RGIntegerFormat, RTTNode, RangeNode, RectAreaLight, RectAreaLightNode, RedFormat, RedIntegerFormat, ReferenceNode, ReflectorNode, ReinhardToneMapping, RemapNode, RenderOutputNode, RenderTarget, RendererReferenceNode, RendererUtils, RepeatWrapping, ReplaceStencilOp, ReverseSubtractEquation, RotateNode, SIGNED_RED_GREEN_RGTC2_Format, SIGNED_RED_RGTC1_Format, SRGBColorSpace, SRGBTransfer, Scene, SceneNode, ScreenNode, ScriptableNode, ScriptableValueNode, SetNode, ShadowBaseNode, ShadowMaterial, ShadowNode, ShadowNodeMaterial, ShortType, SkinningNode, SphereGeometry, SplitNode, SpotLight, SpotLightNode, SpriteMaterial, SpriteNodeMaterial, SpriteSheetUVNode, SrcAlphaFactor, SrcAlphaSaturateFactor, SrcColorFactor, StackNode, StaticDrawUsage, StorageArrayElementNode, StorageBufferAttribute, StorageBufferNode, StorageInstancedBufferAttribute, StorageTexture, StorageTextureNode, StructNode, StructTypeNode, SubtractEquation, SubtractiveBlending, TSL, TangentSpaceNormalMap, TempNode, Texture, Texture3DNode, TextureNode, TextureSizeNode, ToneMappingNode, ToonOutlinePassNode, TriplanarTexturesNode, UVMapping, Uint16BufferAttribute, Uint32BufferAttribute, UniformArrayNode, UniformGroupNode, UniformNode, UnsignedByteType, UnsignedInt248Type, UnsignedInt5999Type, UnsignedIntType, UnsignedShort4444Type, UnsignedShort5551Type, UnsignedShortType, UserDataNode, VSMShadowMap, VarNode, VaryingNode, Vector2, Vector3, Vector4, VertexColorNode, ViewportDepthNode, ViewportDepthTextureNode, ViewportSharedTextureNode, ViewportTextureNode, VolumeNodeMaterial, WebGLCoordinateSystem, WebGLCubeRenderTarget, WebGPUCoordinateSystem, WebGPURenderer, WebXRController, ZeroFactor, ZeroStencilOp, createCanvasElement, defaultBuildStages, defaultShaderStages, shaderStages, vectorComponents };