Three.js 学习 - 使用图片作为材质展示
这篇的目的,很简单,就是希望对图片进行一个特效,最好是使用 shader 来实现。 PS: 虽然自己对 shader 不是很熟悉,但是还是跟上一篇一样,在实现中学习嘛。
首先:初始化一个 three.js 场景
这个其实是跟之前的那一篇,步骤一样的。就是简单的初始化 scene 和 camera 和 animate 。
<script setup lang="ts">
import { AmbientLight, PCFSoftShadowMap, PerspectiveCamera, Scene, WebGLRenderer } from 'three'
const canvasContainer = ref<HTMLCanvasElement | null>(null)
let canvas: HTMLElement
let renderer: WebGLRenderer
let scene: Scene
let camera: PerspectiveCamera
onMounted(() => {
initThree()
animate()
})
function initThree() {
// ===== 🖼️ 画布, 渲染, & 场景 =====
{
canvas = canvasContainer.value!
renderer = new WebGLRenderer({
canvas,
antialias: true, // 抗锯齿
alpha: true, // 渲染器透明
precision: 'highp', // 着色器开启高精度
})
renderer.setPixelRatio(Math.min(window.devicePixelRatio, 2))
renderer.shadowMap.enabled = true
renderer.shadowMap.type = PCFSoftShadowMap
scene = new Scene()
}
// ===== 🎥 相机 =====
{
camera = new PerspectiveCamera(
50,
canvas.clientWidth / canvas.clientHeight,
0.1,
100,
)
camera.position.set(2, 2, 5)
}
// ===== 💡 灯光 =====
{
const ambientlight = new AmbientLight(0xffffff, 2)
scene.add(ambientlight)
}
}
function animate() {
requestAnimationFrame(animate)
// 渲染器
renderer.clear()
renderer.clearDepth()
if (resizeRendererToDisplaySize(renderer)) {
const canvas = renderer.domElement
camera.aspect = canvas.clientWidth / canvas.clientHeight
camera.updateProjectionMatrix()
}
renderer.render(scene, camera)
}
function resizeRendererToDisplaySize(renderer: WebGLRenderer) {
const canvas = renderer.domElement
const width = canvas.clientWidth
const height = canvas.clientHeight
const needResize = canvas.width !== width || canvas.height !== height
if (needResize) {
renderer.setSize(width, height, false)
}
return needResize
}
</script>
<template>
<div class="relative flex items-center justify-center">
<img
src="https://images.unsplash.com/photo-1517841905240-472988babdf9?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=934&q=80"
data-hover="https://images.unsplash.com/photo-1522609925277-66fea332c575?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=934&q=80"
alt="My image"
width="400"
>
<canvas
ref="canvasContainer"
class="absolute inset-0 h-full w-full"
/>
</div>
</template>
VUE
然后:简单设置第一个场景
这里的思路是,DOM 加载一个图片,然后在 three.js 中获取到后,将原来的 DOM 的图片 设置为隐藏,再在 three.js 中通过一个 mesh 来展示图片。然后就可以使用 shader 进行一些特效了。也就是把 three.js 的图片替换了原来的图片。
其实并没有特殊之处,这应该是最简单的思路了,而且 canvas 设置的是 fixed inset-0 h-full w-full ,所以 canvas 的尺寸会覆盖整个页面。
那么需要考虑到的就是:
- 如何将 canvas 中渲染的图片的位置,定位到原来 DOM 的图片位置。
- three.js 渲染图片的时候, colorSpace 的值需要是 srgb 的。不然颜色会有点异常。
那么第一点,因为 canvas 是 fixed 全屏了,所以只需要获取其绝对位置,然后设置到 mesh 的 position 上就可以了。
不过也就是因为设置的 fixed ,所以都没有考虑到滚动,而是默认的是图片一定在视口中间。不然可能图片只显示一半,再通过绝对位置定位之后,在 three.js 中渲染的图片也就是只显示一半。
完整的代码如下:
<script setup lang="ts">
import gsap from 'gsap'
import { AmbientLight, Mesh, MeshBasicMaterial, PCFSoftShadowMap, PerspectiveCamera, PlaneGeometry, Scene, TextureLoader, Vector2, WebGLRenderer } from 'three'
const canvasContainer = ref<HTMLCanvasElement | null>(null)
let canvas: HTMLElement
let renderer: WebGLRenderer
let scene: Scene
let camera: PerspectiveCamera
let mesh: Mesh
const mouse = new Vector2(0, 0)
onMounted(() => {
initThree()
animate()
nextTick(() => {
createMeshToScene()
})
window.addEventListener('mousemove', (ev) => {
onMouseMove(ev)
})
})
function initThree() {
// ===== 🖼️ 画布, 渲染, & 场景 =====
{
canvas = canvasContainer.value!
renderer = new WebGLRenderer({
canvas,
antialias: true, // 抗锯齿
alpha: true, // 渲染器透明
precision: 'highp', // 着色器开启高精度
})
renderer.setPixelRatio(Math.min(window.devicePixelRatio, 2))
renderer.shadowMap.enabled = true
renderer.shadowMap.type = PCFSoftShadowMap
scene = new Scene()
}
// ===== 🎥 相机 =====
{
// 计算正确的视场角(FOV)和相机位置
const fov = 45
const distance = window.innerHeight / 2 / Math.tan(Math.PI * fov / 360)
camera = new PerspectiveCamera(
fov,
window.innerWidth / window.innerHeight,
1,
2000,
)
// 调整相机位置到合适的距离
camera.position.set(0, 0, distance)
}
// ===== 💡 灯光 =====
{
const ambientlight = new AmbientLight(0xffffff, 2)
scene.add(ambientlight)
}
}
function animate() {
requestAnimationFrame(animate)
// 渲染器
renderer.clear()
renderer.clearDepth()
if (resizeRendererToDisplaySize(renderer)) {
const canvas = renderer.domElement
camera.aspect = canvas.clientWidth / canvas.clientHeight
camera.updateProjectionMatrix()
}
renderer.render(scene, camera)
}
function resizeRendererToDisplaySize(renderer: WebGLRenderer) {
const canvas = renderer.domElement
const width = canvas.clientWidth
const height = canvas.clientHeight
const needResize = canvas.width !== width || canvas.height !== height
if (needResize) {
renderer.setSize(width, height, false)
}
return needResize
}
const imageRef = ref<HTMLImageElement | null>(null)
function createMeshToScene() {
if (!imageRef.value || !canvasContainer.value)
return
// 1. 加载图片
const loader = new TextureLoader()
loader.load(
imageRef.value.src,
(texture) => {
texture.colorSpace = 'srgb'
// 2. 获取 DOM 中图片的实际显示尺寸和位置
const { width, height, top, left } = imageRef.value!.getBoundingClientRect()
// 3. 创建几何体
const geometry = new PlaneGeometry(width, height, 1, 1)
// 4. 创建材质
const material = new MeshBasicMaterial({
map: texture,
})
// 5. 创建网格
mesh = new Mesh(geometry, material)
// 6. 计算位置偏移,使mesh位置与原始图片位置对齐
const offset = new Vector2(
left - window.innerWidth / 2 + width / 2,
-top + window.innerHeight / 2 - height / 2,
)
mesh.position.set(offset.x, offset.y, 0)
// 7. 将网格添加到场景中
scene.add(mesh)
},
)
imageRef.value.style.opacity = '0'
}
function onMouseMove(event: MouseEvent) {
if (!mesh)
return
gsap.to(mouse, 0.5, {
x: (event.clientX / window.innerWidth) * 2 - 1,
y: -(event.clientY / window.innerHeight) * 2 + 1,
})
gsap.to(mesh.rotation, 0.5, {
x: -mouse.y * 0.3,
y: mouse.x * (Math.PI / 6),
})
}
</script>
<template>
<div class="flex items-center justify-center">
<img
ref="imageRef"
src="https://images.unsplash.com/photo-1517841905240-472988babdf9?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=934&q=80"
data-hover="https://images.unsplash.com/photo-1522609925277-66fea332c575?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=934&q=80"
alt="My image"
width="400"
>
<canvas
ref="canvasContainer"
class="fixed inset-0 h-full w-full"
/>
</div>
</template>
VUE
再是:更进一步
鼠标 hover 到图片上,查看效果。
上一步的代码,使用图片通过 MeshBasicMaterial ,设置给 mesh 的 material 。现在更进一步,使用 ShaderMaterial 加载图片,然后给其设置一个 hover 的效果,两个图片的叠加显示,使用 shader 表现一个弯曲的圆形透明。
核心代码:
const uniforms = {
u_image: { type: 't', value: image },
u_imageHover: { type: 't', value: hoverImage },
u_mouse: { value: mouse },
u_time: { value: 0 },
u_res: {
value: new Vector2(window.innerWidth, window.innerHeight),
},
}
// 4. 创建材质
const material = new ShaderMaterial({
uniforms,
vertexShader,
fragmentShader,
defines: {
PR: window.devicePixelRatio.toFixed(1),
},
})
// 5. 创建网格
mesh = new Mesh(geometry, material)
TS
完整代码:
<script setup lang="ts">
import gsap from 'gsap'
import { AmbientLight, Mesh, MeshBasicMaterial, PCFSoftShadowMap, PerspectiveCamera, PlaneGeometry, Scene, ShaderMaterial, TextureLoader, Vector2, WebGLRenderer } from 'three'
const canvasContainer = ref<HTMLCanvasElement | null>(null)
let canvas: HTMLElement
let renderer: WebGLRenderer
let scene: Scene
let camera: PerspectiveCamera
let mesh: Mesh
const mouse = new Vector2(-1, -1)
onMounted(() => {
initThree()
animate()
nextTick(() => {
createMeshToScene()
})
window.addEventListener('mousemove', (ev) => {
onMouseMove(ev)
})
})
function initThree() {
// ===== 🖼️ 画布, 渲染, & 场景 =====
{
canvas = canvasContainer.value!
renderer = new WebGLRenderer({
canvas,
antialias: true, // 抗锯齿
alpha: true, // 渲染器透明
precision: 'highp', // 着色器开启高精度
})
renderer.setPixelRatio(Math.min(window.devicePixelRatio, 2))
renderer.shadowMap.enabled = true
renderer.shadowMap.type = PCFSoftShadowMap
scene = new Scene()
}
// ===== 🎥 相机 =====
{
// 计算正确的视场角(FOV)和相机位置
const fov = 45
const distance = window.innerHeight / 2 / Math.tan(Math.PI * fov / 360)
camera = new PerspectiveCamera(
fov,
window.innerWidth / window.innerHeight,
1,
2000,
)
// 调整相机位置到合适的距离
camera.position.set(0, 0, distance)
}
// ===== 💡 灯光 =====
{
const ambientlight = new AmbientLight(0xffffff, 2)
scene.add(ambientlight)
}
}
function animate() {
requestAnimationFrame(animate)
// 渲染器
renderer.clear()
renderer.clearDepth()
if (resizeRendererToDisplaySize(renderer)) {
const canvas = renderer.domElement
camera.aspect = canvas.clientWidth / canvas.clientHeight
camera.updateProjectionMatrix()
}
renderer.render(scene, camera)
}
function resizeRendererToDisplaySize(renderer: WebGLRenderer) {
const canvas = renderer.domElement
const width = canvas.clientWidth
const height = canvas.clientHeight
const needResize = canvas.width !== width || canvas.height !== height
if (needResize) {
renderer.setSize(width, height, false)
}
return needResize
}
const vertexShader = `
varying vec2 v_uv;
void main() {
v_uv = uv;
gl_Position = projectionMatrix * modelViewMatrix *
vec4(position, 1.0);
}
`
const fragmentShader = `
uniform vec2 u_mouse;
uniform vec2 u_res;
uniform sampler2D u_image;
uniform sampler2D u_imageHover;
uniform float u_time;
varying vec2 v_uv;
float circle(in vec2 _st, in float _radius, in float blurriness){
vec2 dist = _st;
return 1.-smoothstep(_radius-(_radius*blurriness), _radius+(_radius*blurriness), dot(dist,dist)*4.0);
}
vec3 mod289(vec3 x) {
return x - floor(x * (1.0 / 289.0)) * 289.0;
}
vec4 mod289(vec4 x) {
return x - floor(x * (1.0 / 289.0)) * 289.0;
}
vec4 permute(vec4 x) {
return mod289(((x*34.0)+1.0)*x);
}
vec4 taylorInvSqrt(vec4 r)
{
return 1.79284291400159 - 0.85373472095314 * r;
}
float snoise3(vec3 v)
{
const vec2 C = vec2(1.0/6.0, 1.0/3.0) ;
const vec4 D = vec4(0.0, 0.5, 1.0, 2.0);
// First corner
vec3 i = floor(v + dot(v, C.yyy) );
vec3 x0 = v - i + dot(i, C.xxx) ;
// Other corners
vec3 g = step(x0.yzx, x0.xyz);
vec3 l = 1.0 - g;
vec3 i1 = min( g.xyz, l.zxy );
vec3 i2 = max( g.xyz, l.zxy );
// x0 = x0 - 0.0 + 0.0 * C.xxx;
// x1 = x0 - i1 + 1.0 * C.xxx;
// x2 = x0 - i2 + 2.0 * C.xxx;
// x3 = x0 - 1.0 + 3.0 * C.xxx;
vec3 x1 = x0 - i1 + C.xxx;
vec3 x2 = x0 - i2 + C.yyy; // 2.0*C.x = 1/3 = C.y
vec3 x3 = x0 - D.yyy; // -1.0+3.0*C.x = -0.5 = -D.y
// Permutations
i = mod289(i);
vec4 p = permute( permute( permute(
i.z + vec4(0.0, i1.z, i2.z, 1.0 ))
+ i.y + vec4(0.0, i1.y, i2.y, 1.0 ))
+ i.x + vec4(0.0, i1.x, i2.x, 1.0 ));
// Gradients: 7x7 points over a square, mapped onto an octahedron.
// The ring size 17*17 = 289 is close to a multiple of 49 (49*6 = 294)
float n_ = 0.142857142857; // 1.0/7.0
vec3 ns = n_ * D.wyz - D.xzx;
vec4 j = p - 49.0 * floor(p * ns.z * ns.z); // mod(p,7*7)
vec4 x_ = floor(j * ns.z);
vec4 y_ = floor(j - 7.0 * x_ ); // mod(j,N)
vec4 x = x_ *ns.x + ns.yyyy;
vec4 y = y_ *ns.x + ns.yyyy;
vec4 h = 1.0 - abs(x) - abs(y);
vec4 b0 = vec4( x.xy, y.xy );
vec4 b1 = vec4( x.zw, y.zw );
//vec4 s0 = vec4(lessThan(b0,0.0))*2.0 - 1.0;
//vec4 s1 = vec4(lessThan(b1,0.0))*2.0 - 1.0;
vec4 s0 = floor(b0)*2.0 + 1.0;
vec4 s1 = floor(b1)*2.0 + 1.0;
vec4 sh = -step(h, vec4(0.0));
vec4 a0 = b0.xzyw + s0.xzyw*sh.xxyy ;
vec4 a1 = b1.xzyw + s1.xzyw*sh.zzww ;
vec3 p0 = vec3(a0.xy,h.x);
vec3 p1 = vec3(a0.zw,h.y);
vec3 p2 = vec3(a1.xy,h.z);
vec3 p3 = vec3(a1.zw,h.w);
//Normalise gradients
vec4 norm = taylorInvSqrt(vec4(dot(p0,p0), dot(p1,p1), dot(p2, p2), dot(p3,p3)));
p0 *= norm.x;
p1 *= norm.y;
p2 *= norm.z;
p3 *= norm.w;
// Mix final noise value
vec4 m = max(0.6 - vec4(dot(x0,x0), dot(x1,x1), dot(x2,x2), dot(x3,x3)), 0.0);
m = m * m;
return 42.0 * dot( m*m, vec4( dot(p0,x0), dot(p1,x1),
dot(p2,x2), dot(p3,x3) ) );
}
void main() {
// We manage the device ratio by passing PR constant
vec2 res = u_res * PR;
vec2 st = gl_FragCoord.xy / res.xy - vec2(0.5);
// tip: use the following formula to keep the good ratio of your coordinates
st.y *= u_res.y / u_res.x;
// We readjust the mouse coordinates
vec2 mouse = u_mouse * -0.5;
vec2 circlePos = st + mouse;
float c = circle(circlePos, 0.05, 2.) * 2.5;
float offx = v_uv.x + sin(v_uv.y + u_time * .1);
float offy = v_uv.y - u_time * 0.1 - cos(u_time * .001) * .01;
float n = snoise3(vec3(offx, offy, u_time * .1) * 8.) - 1.;
float finalMask = smoothstep(0.4, 0.5, n + pow(c, 2.));
vec4 image = texture2D(u_image, v_uv);
vec4 hover = texture2D(u_imageHover, v_uv);
vec4 finalImage = mix(image, hover, finalMask);
gl_FragColor = finalImage;
}
`
const imageRef = ref<HTMLImageElement | null>(null)
const hoverImageRef = ref<HTMLImageElement | null>(null)
async function createMeshToScene() {
if (!imageRef.value || !canvasContainer.value)
return
// 1. 加载图片
const loader = new TextureLoader()
const hoverImage = await loader.load(hoverImageRef.value!.src)
const image = await loader.load(imageRef.value.src)
// 2. 设置图片颜色空间
hoverImage.colorSpace = 'srgb'
image.colorSpace = 'srgb'
// 3. 获取 DOM 中图片的实际显示尺寸和位置
const { width, height, top, left } = imageRef.value!.getBoundingClientRect()
// 3. 创建几何体
const geometry = new PlaneGeometry(width, height, 1, 1)
const uniforms = {
u_image: { type: 't', value: image },
u_imageHover: { type: 't', value: hoverImage },
u_mouse: { value: mouse },
u_time: { value: 0 },
u_res: {
value: new Vector2(window.innerWidth, window.innerHeight),
},
}
// 4. 创建材质
const material = new ShaderMaterial({
uniforms,
vertexShader,
fragmentShader,
defines: {
PR: window.devicePixelRatio.toFixed(1),
},
})
// 4. 创建材质
// const material = new MeshBasicMaterial({
// map: image,
// })
// 5. 创建网格
mesh = new Mesh(geometry, material)
// 6. 计算位置偏移,使mesh位置与原始图片位置对齐
const offset = new Vector2(
left - window.innerWidth / 2 + width / 2,
-top + window.innerHeight / 2 - height / 2,
)
mesh.position.set(offset.x, offset.y, 0)
// 7. 将网格添加到场景中
scene.add(mesh)
imageRef.value.style.opacity = '0'
}
function onMouseMove(event: MouseEvent) {
if (!mesh)
return
gsap.to(mouse, 0.5, {
x: (event.clientX / window.innerWidth) * 2 - 1,
y: -(event.clientY / window.innerHeight) * 2 + 1,
})
gsap.to(mesh.rotation, 0.5, {
x: -mouse.y * 0.3,
y: mouse.x * (Math.PI / 6),
})
}
</script>
<template>
<div class="flex items-center justify-center">
<img
ref="imageRef"
src="https://images.unsplash.com/photo-1517841905240-472988babdf9?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=934&q=80"
alt="My image"
width="400"
>
<img
ref="hoverImageRef"
src="https://images.unsplash.com/photo-1522609925277-66fea332c575?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=934&q=80"
width="400"
style="display: none"
>
<canvas
ref="canvasContainer"
class="fixed inset-0 h-full w-full"
/>
</div>
</template>
VUE