0

我正在尝试使用我从本文后半部分修改的代码https://threejsfundamentals.org/threejs/lessons/threejs-picking.html使用 Points 实现 GPU 拾取

它在桌面上对我来说一直很好,但我开始测试不同的浏览器和设备,但它并不能始终如一地工作。我制作了一个 Codepen 来说明https://codepen.io/deklanw/pen/OJVVmEd?editors=1111

body {
  margin: 0;
}
#c {
  width: 100vw;
  height: 100vh;
  display: block;
}
<canvas id="c"></canvas>
<script type="module">
// Three.js - Picking - RayCaster w/Transparency
// from https://threejsfundamentals.org/threejs/threejs-picking-gpu.html

import * as THREE from "https://threejsfundamentals.org/threejs/resources/threejs/r113/build/three.module.js";

function main() {
  const canvas = document.querySelector("#c");
  const renderer = new THREE.WebGLRenderer({ canvas });

  const fov = 60;
  const aspect = 2; // the canvas default
  const near = 0.1;
  const far = 200;
  const camera = new THREE.PerspectiveCamera(fov, aspect, near, far);
  camera.position.z = 30;

  const scene = new THREE.Scene();
  scene.background = new THREE.Color(0);
  const pickingScene = new THREE.Scene();
  pickingScene.background = new THREE.Color(0);

  // put the camera on a pole (parent it to an object)
  // so we can spin the pole to move the camera around the scene
  const cameraPole = new THREE.Object3D();
  scene.add(cameraPole);
  cameraPole.add(camera);

  function randomNormalizedColor() {
    return Math.random();
  }

  function getRandomInt(n) {
    return Math.floor(Math.random() * n);
  }

  function getCanvasRelativePosition(e) {
    const rect = canvas.getBoundingClientRect();
    return {
      x: e.clientX - rect.left,
      y: e.clientY - rect.top
    };
  }

  const textureLoader = new THREE.TextureLoader();
  const particleTexture =
    "https://raw.githubusercontent.com/mrdoob/three.js/master/examples/textures/sprites/ball.png";

  const vertexShader = `
    attribute float size;
    attribute vec3 customColor;

    varying vec3 vColor;

    void main() {
        vColor = customColor;
        vec4 mvPosition = modelViewMatrix * vec4( position, 1.0 );
        gl_PointSize = size * ( 100.0 / length( mvPosition.xyz ) );
        gl_Position = projectionMatrix * mvPosition;
    }
`;

  const fragmentShader = `
    uniform sampler2D texture;
    varying vec3 vColor;

    void main() {
        vec4 tColor = texture2D( texture, gl_PointCoord );
        if (tColor.a < 0.5) discard;
        gl_FragColor = mix( vec4( vColor.rgb, 1.0 ), tColor, 0.1 );
    }
`;

  const pickFragmentShader = `
    uniform sampler2D texture;
    varying vec3 vColor;

    void main() {
      vec4 tColor = texture2D( texture, gl_PointCoord );
      if (tColor.a < 0.25) discard;
      gl_FragColor = vec4( vColor.rgb, 1.0);
    }
`;

  const materialSettings = {
    uniforms: {
      texture: {
        type: "t",
        value: textureLoader.load(particleTexture)
      }
    },
    vertexShader: vertexShader,
    fragmentShader: fragmentShader,
    blending: THREE.NormalBlending,
    depthTest: true,
    transparent: false
  };

  const createParticleMaterial = () => {
    const material = new THREE.ShaderMaterial(materialSettings);
    return material;
  };

  const createPickingMaterial = () => {
    const material = new THREE.ShaderMaterial({
      ...materialSettings,
      fragmentShader: pickFragmentShader,
      blending: THREE.NormalBlending
    });
    return material;
  };

  const geometry = new THREE.BufferGeometry();
  const pickingGeometry = new THREE.BufferGeometry();
  const colors = [];
  const sizes = [];
  const pickingColors = [];
  const pickingColor = new THREE.Color();
  const positions = [];

  for (let i = 0; i < 30; i++) {
    colors[3 * i] = randomNormalizedColor();
    colors[3 * i + 1] = randomNormalizedColor();
    colors[3 * i + 2] = randomNormalizedColor();

    const rgbPickingColor = pickingColor.setHex(i + 1);
    pickingColors[3 * i] = rgbPickingColor.r;
    pickingColors[3 * i + 1] = rgbPickingColor.g;
    pickingColors[3 * i + 2] = rgbPickingColor.b;

    sizes[i] = getRandomInt(20);

    positions[3 * i] = getRandomInt(20);
    positions[3 * i + 1] = getRandomInt(20);
    positions[3 * i + 2] = getRandomInt(20);
  }

  geometry.setAttribute(
    "position",
    new THREE.Float32BufferAttribute(positions, 3)
  );
  geometry.setAttribute(
    "customColor",
    new THREE.Float32BufferAttribute(colors, 3)
  );
  geometry.setAttribute("size", new THREE.Float32BufferAttribute(sizes, 1));

  geometry.computeBoundingBox();

  const material = createParticleMaterial();
  const points = new THREE.Points(geometry, material);

  // setup geometry and material for GPU picking
  pickingGeometry.setAttribute(
    "position",
    new THREE.Float32BufferAttribute(positions, 3)
  );
  pickingGeometry.setAttribute(
    "customColor",
    new THREE.Float32BufferAttribute(pickingColors, 3)
  );
  pickingGeometry.setAttribute(
    "size",
    new THREE.Float32BufferAttribute(sizes, 1)
  );

  pickingGeometry.computeBoundingBox();

  const pickingMaterial = createPickingMaterial();
  const pickingPoints = new THREE.Points(pickingGeometry, pickingMaterial);

  scene.add(points);
  pickingScene.add(pickingPoints);

  function resizeRendererToDisplaySize(renderer) {
    const canvas = renderer.domElement;
    const width = canvas.clientWidth;
    const height = canvas.clientHeight;
    const needResize = canvas.width !== width || canvas.height !== height;
    if (needResize) {
      renderer.setSize(width, height, false);
    }
    return needResize;
  }

  class GPUPickHelper {
    constructor() {
      // create a 1x1 pixel render target
      this.pickingTexture = new THREE.WebGLRenderTarget(1, 1);
      this.pixelBuffer = new Uint8Array(4);
    }
    pick(cssPosition, pickingScene, camera) {
      const { pickingTexture, pixelBuffer } = this;

      // set the view offset to represent just a single pixel under the mouse
      const pixelRatio = renderer.getPixelRatio();
      camera.setViewOffset(
        renderer.getContext().drawingBufferWidth, // full width
        renderer.getContext().drawingBufferHeight, // full top
        (cssPosition.x * pixelRatio) | 0, // rect x
        (cssPosition.y * pixelRatio) | 0, // rect y
        1, // rect width
        1 // rect height
      );
      // render the scene
      renderer.setRenderTarget(pickingTexture);
      renderer.render(pickingScene, camera);
      renderer.setRenderTarget(null);
      // clear the view offset so rendering returns to normal
      camera.clearViewOffset();
      //read the pixel
      renderer.readRenderTargetPixels(
        pickingTexture,
        0, // x
        0, // y
        1, // width
        1, // height
        pixelBuffer
      );

      const id =
        (pixelBuffer[0] << 16) | (pixelBuffer[1] << 8) | pixelBuffer[2];
      
      console.log(`You clicked sphere number ${id}`);
      
      return id;
    }
  }

  const pickHelper = new GPUPickHelper();

  function render(time) {
    time *= 0.001; // convert to seconds;

    if (resizeRendererToDisplaySize(renderer)) {
      const canvas = renderer.domElement;
      camera.aspect = canvas.clientWidth / canvas.clientHeight;
      camera.updateProjectionMatrix();
    }

    cameraPole.rotation.y = time * 0.1;

    renderer.render(scene, camera);

    requestAnimationFrame(render);
  }
  requestAnimationFrame(render);

  function onClick(e) {
    const pickPosition = getCanvasRelativePosition(e);
    const pickedID = pickHelper.pick(pickPosition, pickingScene, camera);
  }

  function onTouch(e) {
    const touch = e.touches[0];
    const pickPosition = getCanvasRelativePosition(touch);
    const pickedID = pickHelper.pick(pickPosition, pickingScene, camera);
  }

  window.addEventListener("mousedown", onClick);
  window.addEventListener("touchstart", onTouch);
}

main();
</script>

如果您单击(或点击)节点,它们的 ID 应该会在控制台中弹出。在某些设备上,我只得到 0,就像选择背景一样。

有谁知道为什么?

此外,如果有一种方法可以在这种情况下(通过 ShaderMaterial 具有可变大小点的点网格)使用一种仍然有效的更简单的方法,我很好奇如何

编辑:

我删除了 1x1 渲染目标优化,它似乎已经修复了它。现在我想知道那个优化会导致什么问题..

4

1 回答 1

2

问题是您不能以这种方式跨设备使用积分。

一个点是否在其中心在屏幕外时被绘制是与设备无关的(OpenGL ES / WebGL 规范说它仍然应该被绘制,OpenGL 规范说它不是。没有针对它的测试,所以每个驱动程序都是不同的)和对于 WebGL 实现来说,工作量太大了,所以他们不会。AFAIK 英特尔和 NVidia 确实绘制了它们。AMD 和基于 PowerVR 的 (iPhone) 不会绘制它们。

如果您将圆圈变大并确保它们离开屏幕(并且您可能需要使画布变小),您会看到这个问题。在某些设备上,它们会平稳地离开屏幕,而在其他设备上,一旦中心离开屏幕,它们就会消失(通常取决于点的大小和视口的大小)

这意味着无论有没有 1x1 像素渲染目标,您的示例在任何一种情况下都不起作用,只是对于 1x1 像素渲染目标,几乎所有圆圈的中心都在 1x1 像素区域之外,因此它们不会被绘制在某些设备上。当您使渲染目标与画布的大小匹配时,大多数圆圈的中心都在内部,但您仍然会在边缘出现拾取错误。

要解决这个问题,您需要使用四边形而不是点来绘制点。有很多方法可以做到这一点。将每个四边形绘制为单独的网格或精灵,或者将所有四边形合并到另一个网格中,或者InstancedMesh在每个点需要一个矩阵的地方使用,或者编写自定义着色器来做点(参见本文的最后一个示例)

请注意,点也有其他问题。默认情况下,它们不会相对于画布大小进行缩放(当然,您可以在着色器中修复此问题,并且 three.js 也有此选项)。它们还具有独立于设备的最大尺寸,根据规范可以低至 1 个像素。它们对设备像素比设置反应不佳(尽管您也可以在代码中修复它)。由于所有这些原因,积分的用途有限。代码绘制的大圆圈可以说超出了这个限制。

于 2020-02-19T18:05:40.873 回答