Three.js 学习 - 图片位移贴图变形过渡

实现 https://rahulv.dev/ 的首页的图片位移变形过渡效果。

背景

经过上一次的学习,知道了如何用 three.js 来展示图片,并且使用 shader 来处理图片的变形效果。

不过还是设置的比较简单,因为 canvas 是设置的全屏的,就是 canvas 只有一个画面,跟用户交互的比较弱。

接下来,我就直接用更简单一点的思路,创建一个跟图片一样大小的 canvas ,然后图片直接覆盖上去。那么其实就是完全使用 three.js 加载图片并替换了原来的 DOM 图片展示。

先创建 canvas 加载图片

这里要注意的一个点,其实是canvas 的尺寸要跟图片的一样大小,加载的图片填充了 canvas 后,还需要设置 camera 的距离,这样图片才能显示的跟 DOM 图片一样大。

跟上一篇的代码几乎一样,不过将 canvas 的 fixed 改了,代码如下:

          <script setup lang="ts">
import gsap from 'gsap'
import { AmbientLight, Mesh, MeshBasicMaterial, PCFSoftShadowMap, PerspectiveCamera, PlaneGeometry, Scene, TextureLoader, Vector2, WebGLRenderer } from 'three'

const canvasContainer = ref<HTMLCanvasElement | null>(null)

let canvas: HTMLElement
let renderer: WebGLRenderer
let scene: Scene
let camera: PerspectiveCamera
let mesh: Mesh
const mouse = new Vector2(0, 0)

onMounted(() => {
  initThree()
  animate()

  nextTick(() => {
    createMeshToScene()
  })

  window.addEventListener('mousemove', (ev) => {
    onMouseMove(ev)
  })
})

function initThree() {
  // ===== 🖼️ 画布, 渲染, & 场景 =====
  {
    canvas = canvasContainer.value!
    renderer = new WebGLRenderer({
      canvas,
      antialias: true,
      alpha: true,
      precision: 'highp',
    })
    renderer.setPixelRatio(Math.min(window.devicePixelRatio, 2))
    renderer.shadowMap.enabled = true
    renderer.shadowMap.type = PCFSoftShadowMap
    scene = new Scene()

    // 设置渲染器尺寸为容器尺寸
    const { width, height } = canvas.getBoundingClientRect()
    renderer.setSize(width, height, false)
  }

  // ===== 🎥 相机 =====
  {
    const aspect = window.innerWidth / window.innerHeight
    const fov = 45
    const height = 2 // 设置视景体高度为 2 个单位
    const distance = height / (2 * Math.tan((fov * Math.PI) / 360))
    
    camera = new PerspectiveCamera(fov, aspect, 1, 2000)
    camera.position.z = distance // 只设置 z 轴距离
  }

  // ===== 💡 灯光 =====
  {
    const ambientlight = new AmbientLight(0xffffff, 2)
    scene.add(ambientlight)
  }
}
function animate() {
  requestAnimationFrame(animate)

  // 渲染器
  renderer.clear()
  renderer.clearDepth()

  if (resizeRendererToDisplaySize(renderer)) {
    const canvas = renderer.domElement
    camera.aspect = canvas.clientWidth / canvas.clientHeight
    camera.updateProjectionMatrix()
  }
  renderer.render(scene, camera)
}

function resizeRendererToDisplaySize(renderer: WebGLRenderer) {
  const canvas = renderer.domElement
  const width = canvas.clientWidth
  const height = canvas.clientHeight
  const needResize = canvas.width !== width || canvas.height !== height
  if (needResize) {
    renderer.setSize(width, height, false)
  }
  return needResize
}

const imageRef = ref<HTMLImageElement | null>(null)
function createMeshToScene() {
  if (!imageRef.value || !canvasContainer.value) 
    return

  const loader = new TextureLoader()
  loader.load(
    imageRef.value.src,
    (texture) => {
      texture.colorSpace = 'srgb'

      // 获取图片的实际显示比例
      const { width, height } = imageRef.value!.getBoundingClientRect()
      const aspectRatio = width / height

      // 设置几何体尺寸,保持宽高比
      const planeHeight = 2 // 与相机视景体高度匹配
      const planeWidth = planeHeight * aspectRatio
      const geometry = new PlaneGeometry(planeWidth, planeHeight)
      
      const material = new MeshBasicMaterial({
        map: texture,
      })
      
      mesh = new Mesh(geometry, material)
      scene.add(mesh)
    },
  )
  imageRef.value.style.opacity = '0'
}

function onMouseMove(event: MouseEvent) {
  if (!mesh)
    return

  gsap.to(mouse, 0.5, {
    x: (event.clientX / window.innerWidth) * 2 - 1,
    y: -(event.clientY / window.innerHeight) * 2 + 1,
  })

  gsap.to(mesh.rotation, 0.5, {
    x: -mouse.y * 0.3,
    y: mouse.x * (Math.PI / 6),
  })
}
</script>

<template>
  <div class="w-1/2! h-auto relative">
    <img
      ref="imageRef"
      src="https://images.unsplash.com/photo-1517841905240-472988babdf9?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=934&q=80"
      alt="My image" 
      class="w-full margin-0!"
    >
    <canvas 
      ref="canvasContainer" 
      class="absolute top-0 left-0 w-full h-full object-cover"
    />
  </div>
</template>

        
VUE

逆向到了 shader 代码

          function image_setup() {
  function img_three(opts) {
    var vertex =
      "varying vec2 vUv;void main() {vUv = uv;gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );}";
    var fragment =
      "varying vec2 vUv;uniform float dispFactor;uniform float dpr;uniform sampler2D disp;uniform sampler2D texture1;uniform sampler2D texture2;uniform float angle1;uniform float angle2;uniform float intensity1;uniform float intensity2;uniform vec4 res;uniform vec2 parent;mat2 getRotM(float angle) {float s = sin(angle);float c = cos(angle);return mat2(c, -s, s, c);}void main() {vec4 disp = texture2D(disp, vUv);vec2 dispVec = vec2(disp.r, disp.g);vec2 uv = 0.5 * gl_FragCoord.xy / (res.xy) ;vec2 myUV = (uv - vec2(0.5))*res.zw + vec2(0.5);vec2 distortedPosition1 = myUV + getRotM(angle1) * dispVec * intensity1 * dispFactor;vec2 distortedPosition2 = myUV + getRotM(angle2) * dispVec * intensity2 * (1.0 - dispFactor);vec4 _texture1 = texture2D(texture1, distortedPosition1);vec4 _texture2 = texture2D(texture2, distortedPosition2);gl_FragColor = mix(_texture1, _texture2, dispFactor);}";

    function firstDefined() {
      for (var i = 0; i < arguments.length; i++) {
        if (arguments[i] !== undefined) return arguments[i];
      }
    }

    var parent = opts.parent;
    var dispImage = opts.displacementImage;
    var image = opts.image;
    var imagesRatio = firstDefined(opts.imagesRatio, 1.0);
    var intensity1 = firstDefined(opts.intensity1, opts.intensity, 1);
    var intensity2 = firstDefined(opts.intensity2, opts.intensity, 1);
    var commonAngle = firstDefined(opts.angle, Math.PI / 4); // 45 degrees by default, so grayscale images work correctly
    var angle1 = firstDefined(opts.angle1, commonAngle);
    var angle2 = firstDefined(opts.angle2, -commonAngle * 3);
    var userHover = firstDefined(opts.hover, true);
    var easing = firstDefined(opts.easing, Expo.easeOut);

    var scene = new THREE.Scene();
    var camera = new THREE.OrthographicCamera(
      parent.offsetWidth / -2,
      parent.offsetWidth / 2,
      parent.offsetHeight / 2,
      parent.offsetHeight / -2,
      1,
      1000
    );

    camera.position.z = 1;

    var renderer = new THREE.WebGLRenderer({
      antialias: false,
      alpha: true,
    });

    renderer.setPixelRatio(2.0);
    renderer.setClearColor(0xffffff, 0.0);
    renderer.setSize(parent.offsetWidth, parent.offsetHeight);
    parent.appendChild(renderer.domElement);

    var render = function () {
      renderer.render(scene, camera);
    };

    var loader = new THREE.TextureLoader();
    loader.crossOrigin = "";

    var disp = loader.load(dispImage);
    disp.magFilter = disp.minFilter = THREE.LinearFilter;

    var disp_textures = {
      tech: { img_name: "tech.jpg" },
      abstract: { img_name: "abstract.jpg" },
      bricks: { img_name: "bricks.jpg" },
      claw: { img_name: "claw.jpg" },
      cult: { img_name: "cult.jpg" },
      numbers: { img_name: "numbers.jpg" },
      pieces: { img_name: "pieces.jpg" },
      species: { img_name: "species.jpg" },
      waves: { img_name: "waves.jpg" },
    };

    $.each(disp_textures, function (key, value) {
      if (!$(".options-panel").length) return;
      disp_textures[key].texture = loader.load(
        "assets/images/effect_maps/" + this.img_name,
        function () {
          disp_textures[key].texture.magFilter = disp_textures[
            key
          ].texture.magFilter = THREE.LinearFilter;
        }
      );
    });

    var texture = loader.load(image, function () {
      texture.magFilter = texture.magFilter = THREE.LinearFilter;
      imagesRatio = texture.image.height / texture.image.width;
      core();
      render();
    });

    function core() {
      let a1, a2;
      var imageAspect = imagesRatio;

      function set_aspect() {
        if (parent.offsetHeight / parent.offsetWidth < imageAspect) {
          a1 = 1;
          a2 = parent.offsetHeight / parent.offsetWidth / imageAspect;
        } else {
          a1 = (parent.offsetWidth / parent.offsetHeight) * imageAspect;
          a2 = 1;
        }
      }
      set_aspect();

      $("#option-hover .button").click(function () {
        btn_active = $(this).data("value");
        $("#option-hover .button.active").removeClass("active");
        $(this).addClass("active");
        mat.uniforms.disp.value = disp_textures[btn_active].texture;
      });

      var mat = new THREE.ShaderMaterial({
        uniforms: {
          intensity1: {
            type: "f",
            value: intensity1,
          },
          intensity2: {
            type: "f",
            value: intensity2,
          },
          dispFactor: {
            type: "f",
            value: 0.0,
          },
          angle1: {
            type: "f",
            value: angle1,
          },
          angle2: {
            type: "f",
            value: angle2,
          },
          texture1: {
            type: "t",
            value: texture,
          },
          texture2: {
            type: "t",
            value: texture,
          },
          disp: {
            type: "t",
            value: disp,
          },
          res: {
            type: "vec4",
            value: new THREE.Vector4(
              parent.offsetWidth,
              parent.offsetHeight,
              a1,
              a2
            ),
          },
          dpr: {
            type: "f",
            value: window.devicePixelRatio,
          },
        },
        vertexShader: vertex,
        fragmentShader: fragment,
        transparent: true,
        opacity: 1.0,
      });

      document.addEventListener("keydown", function (event) {
        const key = event.key;

        if (event.key == "ArrowLeft") {
          mat.uniforms.disp.value = disp_2;
          console.log("kk");
        }
      });

      var geometry = new THREE.PlaneBufferGeometry(
        parent.offsetWidth,
        parent.offsetHeight,
        1
      );
      var object = new THREE.Mesh(geometry, mat);

      scene.add(object);

      function transitionIn() {
        gsap.to(mat.uniforms.dispFactor, {
          value: 1,
          duration: 1.6,
          ease: easing,
          overwrite: true,
          onUpdate: render,
          onComplete: render,
        });
      }

      function transitionOut() {
        gsap.to(mat.uniforms.dispFactor, {
          value: 0,
          duration: 1.2,
          ease: easing,
          overwrite: true,
          onUpdate: render,
          onComplete: render,
        });
      }

      if (userHover) {
        parent.addEventListener("mouseenter", transitionIn);
        parent.addEventListener("mouseleave", transitionOut);
        parent.addEventListener("touchstart", transitionIn);
        parent.addEventListener("touchend", transitionOut);
      }

      window.addEventListener("resize", function (e) {
        set_aspect();
        object.material.uniforms.res.value = new THREE.Vector4(
          parent.offsetWidth,
          parent.offsetHeight,
          a1,
          a2
        );
        renderer.setSize(parent.offsetWidth, parent.offsetHeight);
        render();
      });

      this.next = transitionIn;
      this.previous = transitionOut;
    }
  }

  $(".left-content .picture-box .picture").css(
    "background-image",
    "url(" + config_profile_image_url + ")"
  );

  var map_url =
    "assets/images/effect_maps/" + config_profile_image_effect + ".jpg";
  if (config_profile_image_effect == "custom")
    map_url = config_profile_image_effect_url;

  img_three({
    parent: document.querySelector(".picture-box"),
    intensity: config_profile_image_effect_intensity,
    image: config_profile_image_url,
    video: true,
    displacementImage: map_url,
  });
}

        
JS

能够看的出来,其实核心代码是 img_three 这个函数。

思路

1. 设置布局

现在我们希望的是 canvas 直接替换图片,所以这里先用一个父盒子包着图片元素,然后父盒子下有个 canvas 元素,和图片同级。父盒子的大小是完全被图片撑起来的, canvas 的大小是父盒子的尺寸,所以这样也就达成了 canvas 尺寸和图片的一模一样。

2. 设置相机

接下来就是设置 camera 的距离,要想 canvas 的图片和图片的尺寸一模一样,那么 camera 的设置是必不可少的。

          // ===== 🎥 相机 =====
{
  camera = new OrthographicCamera(
    containerRef.value!.offsetWidth / -2, // 左
    containerRef.value!.offsetWidth / 2, // 右
    containerRef.value!.offsetHeight / 2, // 上
    containerRef.value!.offsetHeight / -2, // 下
    1, // 近裁剪面
    1000, // 远裁剪面
  )
  camera.position.z = 1 // 只设置 z 轴距离
}

        
TS
  1. 使用 OrthographicCamera 相机,正交相机 不使用 PerspectiveCamera 透视相机,因为正交相机不会产生透视效果。所以无论物体距离相机多远,物体的大小都是不变的。那这就正好让 canvas 的图片和 DOM 图片的尺寸一模一样。
  2. OrthographicCamera 的参数 这里高度宽度除以 2 ,是将正交相机的中心跟画布的中心对齐。
  3. 设置 camera.position.z = 1 让相机略微远离平面,不过因为使用的是正交相机,这个距离不会影响 canvas 中显示的图片尺寸,所以其实无所谓。

3. 加载图片

还是跟之前的思路一样,使用 geometry 和 material 来创建一个 mesh ,然后添加到 scene 中。 geometry 就是用于承载图片的的具体物体,material 是材质,可以用来加载图片。这样图片就在 canvas 中显示了。

geometry 的尺寸要跟父盒子的尺寸一样,这样就完整地展示在 canvas 中,也不用再去设置什么偏移。 material 加载图片作为材质的时候,使用 shaderMaterial ,这样就可以使用 shader 来处理图片的变形效果。通过修改传入的 uniforms ,就可以实现对图片的交互操作。

完整代码:

          <script setup lang="ts">
import gsap from 'gsap'
import { AmbientLight, LinearFilter, Mesh, MeshBasicMaterial, OrthographicCamera, PCFSoftShadowMap, PerspectiveCamera, PlaneGeometry, RepeatWrapping, Scene, ShaderMaterial, TextureLoader, Vector2, Vector4, WebGLRenderer } from 'three'

const canvasRef = ref<HTMLCanvasElement | null>(null)
const containerRef = ref<HTMLImageElement | null>(null)

let canvas: HTMLElement
let renderer: WebGLRenderer
let scene: Scene
let camera: OrthographicCamera
let mesh: Mesh

// 添加顶点着色器
const vertexShader = `
  varying vec2 vUv;
  void main() {
    vUv = uv;
    gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);
  }
`

// 添加片段着色器
const fragmentShader = `
  varying vec2 vUv;
  uniform float dispFactor;
  uniform float dpr;
  uniform sampler2D disp;
  uniform sampler2D texture1;
  uniform sampler2D texture2;
  uniform float angle1;
  uniform float angle2;
  uniform float intensity1;
  uniform float intensity2;
  uniform vec4 res;
  uniform vec2 parent;

  mat2 getRotM(float angle) {
    float s = sin(angle);
    float c = cos(angle);
    return mat2(c, -s, s, c);
  }

  void main() {
    vec4 disp = texture2D(disp, vUv);
    vec2 dispVec = vec2(disp.r, disp.g);
    vec2 uv = 0.5 * gl_FragCoord.xy / (res.xy);
    vec2 myUV = (uv - vec2(0.5)) * res.zw + vec2(0.5);
    vec2 distortedPosition1 = myUV + getRotM(angle1) * dispVec * intensity1 * dispFactor;
    vec2 distortedPosition2 = myUV + getRotM(angle2) * dispVec * intensity2 * (1.0 - dispFactor);
    vec4 _texture1 = texture2D(texture1, distortedPosition1);
    vec4 _texture2 = texture2D(texture2, distortedPosition2);
    gl_FragColor = mix(_texture1, _texture2, dispFactor);
  }
`

onMounted(() => {
  initThree()
  animate()

  nextTick(() => {
    createMeshToScene()
  })
})

function initThree() {
  // ===== 🖼️ 画布, 渲染, & 场景 =====
  {
    canvas = canvasRef.value!
    renderer = new WebGLRenderer({
      canvas,
      antialias: true,
      alpha: true,
      precision: 'highp',
    })
    renderer.setPixelRatio(Math.min(window.devicePixelRatio, 2))
    renderer.shadowMap.enabled = true
    renderer.shadowMap.type = PCFSoftShadowMap
    scene = new Scene()

    // 设置渲染器尺寸为容器尺寸
    renderer.setSize(
      containerRef.value!.offsetWidth,
      containerRef.value!.offsetHeight,
      false,
    )
  }

  // ===== 🎥 相机 =====
  {
    camera = new OrthographicCamera(
      containerRef.value!.offsetWidth / -2,
      containerRef.value!.offsetWidth / 2,
      containerRef.value!.offsetHeight / 2,
      containerRef.value!.offsetHeight / -2,
      1,
      1000,
    )
    camera.position.z = 1 // 只设置 z 轴距离
  }

  // ===== 💡 灯光 =====
  {
    const ambientlight = new AmbientLight(0xffffff, 2)
    scene.add(ambientlight)
  }
}
function animate() {
  requestAnimationFrame(animate)

  // 渲染器
  renderer.clear()
  renderer.clearDepth()

  if (resizeRendererToDisplaySize(renderer)) {
    camera.updateProjectionMatrix()
  }
  renderer.render(scene, camera)
}

function resizeRendererToDisplaySize(renderer: WebGLRenderer) {
  const canvas = renderer.domElement
  const width = canvas.clientWidth
  const height = canvas.clientHeight
  const needResize = canvas.width !== width || canvas.height !== height
  if (needResize) {
    renderer.setSize(width, height, false)
  }
  return needResize
}

const imageRef = ref<HTMLImageElement | null>(null)
async function createMeshToScene() {
  if (!imageRef.value || !canvasRef.value || !containerRef.value) 
    return

  const loader = new TextureLoader()
  
  // 加载原始图片
  const texture1 = await loader.load(imageRef.value.src)
  texture1.colorSpace = 'srgb'
  texture1.minFilter = LinearFilter
  texture1.magFilter = LinearFilter
  
  // 加载 hover 状态图片 (这里用了同一张图片,您可以换成另一张)
  const texture2 = await loader.load(imageRef.value.src)
  texture2.colorSpace = 'srgb'
  texture2.minFilter = LinearFilter
  texture2.magFilter = LinearFilter
  
  // 加载位移贴图
  const dispTexture = await loader.load('/image.png') // 替换成您的位移贴图路径
  dispTexture.wrapS = dispTexture.wrapT = RepeatWrapping // 设置纹理重复
  dispTexture.minFilter = LinearFilter
  dispTexture.magFilter = LinearFilter
  dispTexture.repeat.set(1, 1) // 设置重复次数

  // 创建几何体
  const geometry = new PlaneGeometry(
    containerRef.value.offsetWidth,
    containerRef.value.offsetHeight,
    1,
  )
  
  // 创建 shader material
  const material = new ShaderMaterial({
    uniforms: {
      texture1: { value: texture1 },
      texture2: { value: texture2 },
      disp: { value: dispTexture },
      dispFactor: { value: 0.0 },
      dpr: { value: window.devicePixelRatio },
      angle1: { value: Math.PI / 4 }, // 45度角
      angle2: { value: -Math.PI / 4 }, // -45度角
      intensity1: { value: 0.3 },
      intensity2: { value: 0.3 },
      res: { value: new Vector4(
        containerRef.value.offsetWidth,
        containerRef.value.offsetHeight,
        1,
        1,
      ) },
      parent: { value: new Vector2(1, 1) },
    },
    vertexShader,
    fragmentShader,
    transparent: true,
  })
  
  mesh = new Mesh(geometry, material)
  scene.add(mesh)
  
  imageRef.value.style.opacity = '0'
}

// 定义过渡函数
function transitionIn() {
  if (!mesh) 
    return
  
  const material = mesh.material as ShaderMaterial
  
  gsap.to(material.uniforms.dispFactor as any, {
    value: 1.0,
    duration: 1.6,
    ease: 'expo.inOut',
    overwrite: true,
  })
}

function transitionOut() {
  if (!mesh) 
    return
  
  const material = mesh.material as ShaderMaterial
  
  gsap.to(material.uniforms.dispFactor as any, {
    value: 0.0,
    duration: 1.2,
    ease: 'expo.inOut',
    overwrite: true,
  })
}

onUnmounted(() => {
  if (mesh) {
    scene.remove(mesh)
    mesh.geometry.dispose()
    if (mesh.material instanceof ShaderMaterial) {
      mesh.material.dispose()
    }
  }
  renderer.dispose()
})
</script>

<template>
  <div 
    ref="containerRef"
    class="w-1/2! h-auto relative"
    @mouseenter="transitionIn"
    @mouseleave="transitionOut"
    @touchstart="transitionIn"
    @touchend="transitionOut"
  >
    <img
      ref="imageRef"
      src="https://images.unsplash.com/photo-1517841905240-472988babdf9?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=934&q=80"
      alt="My image" 
      class="w-full margin-0!"
    >
    <canvas 
      ref="canvasRef" 
      class="absolute top-0 left-0 w-full h-full object-cover"
    />
  </div>
</template>

        
VUE

todo

后续再分析一下 shader 代码,看看究竟是个怎么回事