3

我正在尝试在创建反应应用程序中从https://google.github.io/mediapipe/solutions/face_mesh#javascript-solution-api运行此 HTML 示例https://codepen.io/mediapipe/details/KKgVaPJ 。我已经做了:

  • npm install 所有的 facemesh mediapipe 包。
  • 已经用节点导入替换了 jsdelivr 标记,我得到了定义和函数。
  • 用 react-cam 替换了视频元素

我不知道如何替换这个 jsdelivr,可能正在影响:

const faceMesh = new FaceMesh({
    locateFile: (file) => {
      return `https://cdn.jsdelivr.net/npm/@mediapipe/face_mesh/${file}`;
    }
  });

所以问题是:

  • 为什么facemesh没有显示?有什么我正在尝试做的例子吗?

这是我的 App.js 代码(对不起调试脚手架):

import './App.css';
import React, { useState, useEffect } from "react";
import Webcam from "react-webcam";
import { Camera, CameraOptions } from '@mediapipe/camera_utils'
import {
  FaceMesh,
  FACEMESH_TESSELATION,
  FACEMESH_RIGHT_EYE,
  FACEMESH_LEFT_EYE,
  FACEMESH_RIGHT_EYEBROW,
  FACEMESH_LEFT_EYEBROW,
  FACEMESH_FACE_OVAL,
  FACEMESH_LIPS
} from '@mediapipe/face_mesh'
import { drawConnectors } from '@mediapipe/drawing_utils'

const videoConstraints = {
  width: 1280,
  height: 720,
  facingMode: "user"
};

function App() {
  const webcamRef = React.useRef(null);
  const canvasReference = React.useRef(null);
  const [cameraReady, setCameraReady] = useState(false);
  let canvasCtx
  let camera

  const videoElement = document.getElementsByClassName('input_video')[0];
  // const canvasElement = document.getElementsByClassName('output_canvas')[0];

  const canvasElement = document.createElement('canvas');

  console.log('canvasElement', canvasElement)
  console.log('canvasCtx', canvasCtx)

  useEffect(() => {
    camera = new Camera(webcamRef.current, {
      onFrame: async () => {
        console.log('{send}',await faceMesh.send({ image: webcamRef.current.video }));
      },
      width: 1280,
      height: 720
    });

    canvasCtx = canvasReference.current.getContext('2d');
    camera.start();
    console.log('canvasReference', canvasReference)

  }, [cameraReady]);

  function onResults(results) {
    console.log('results')
    canvasCtx.save();
    canvasCtx.clearRect(0, 0, canvasElement.width, canvasElement.height);
    canvasCtx.drawImage(
      results.image, 0, 0, canvasElement.width, canvasElement.height);
    if (results.multiFaceLandmarks) {
      for (const landmarks of results.multiFaceLandmarks) {
        drawConnectors(canvasCtx, landmarks, FACEMESH_TESSELATION, { color: '#C0C0C070', lineWidth: 1 });
        drawConnectors(canvasCtx, landmarks, FACEMESH_RIGHT_EYE, { color: '#FF3030' });
        drawConnectors(canvasCtx, landmarks, FACEMESH_RIGHT_EYEBROW, { color: '#FF3030' });
        drawConnectors(canvasCtx, landmarks, FACEMESH_LEFT_EYE, { color: '#30FF30' });
        drawConnectors(canvasCtx, landmarks, FACEMESH_LEFT_EYEBROW, { color: '#30FF30' });
        drawConnectors(canvasCtx, landmarks, FACEMESH_FACE_OVAL, { color: '#E0E0E0' });
        drawConnectors(canvasCtx, landmarks, FACEMESH_LIPS, { color: '#E0E0E0' });
      }
    }
    canvasCtx.restore();
  }

  const faceMesh = new FaceMesh({
    locateFile: (file) => {
      return `https://cdn.jsdelivr.net/npm/@mediapipe/face_mesh/${file}`;
    }
  });
  faceMesh.setOptions({
    selfieMode: true,
    maxNumFaces: 1,
    minDetectionConfidence: 0.5,
    minTrackingConfidence: 0.5
  });
  faceMesh.onResults(onResults);

  // const camera = new Camera(webcamRef.current, {
  //   onFrame: async () => {
  //     await faceMesh.send({ image: videoElement });
  //   },
  //   width: 1280,
  //   height: 720
  // });
  // camera.start();

  return (
    <div className="App">
      <Webcam
        audio={false}
        height={720}
        ref={webcamRef}
        screenshotFormat="image/jpeg"
        width={1280}
        videoConstraints={videoConstraints}
        onUserMedia={() => {
          console.log('webcamRef.current', webcamRef.current);
          // navigator.mediaDevices
          //   .getUserMedia({ video: true })
          //   .then(stream => webcamRef.current.srcObject = stream)
          //   .catch(console.log);

          setCameraReady(true)
        }}
      />
      <canvas
        ref={canvasReference}
        style={{
          position: "absolute",
          marginLeft: "auto",
          marginRight: "auto",
          left: 0,
          right: 0,
          textAlign: "center",
          zindex: 9,
          width: 1280,
          height: 720,
        }}
      />

    </div >
  );
}

export default App;
4

1 回答 1

3

你不必替换jsdelivr,那段代码就可以了;我也认为你需要重新排序你的代码:

  • 你应该把faceMesh初始化放在useEffect里面,用[]作为参数;因此,算法将在页面第一次渲染时启动
  • 此外,您不需要使用 doc.* 获取 videoElement 和 canvasElement,因为您已经定义了一些 refs

代码示例:

useEffect(() => {
const faceMesh = new FaceDetection({
  locateFile: (file) => {
    return `https://cdn.jsdelivr.net/npm/@mediapipe/face_detection/${file}`;
  },
});

faceMesh.setOptions({
  maxNumFaces: 1,
  minDetectionConfidence: 0.5,
  minTrackingConfidence: 0.5,
});

faceMesh.onResults(onResults);

if (
  typeof webcamRef.current !== "undefined" &&
  webcamRef.current !== null
) {
  camera = new Camera(webcamRef.current.video, {
    onFrame: async () => {
      await faceMesh.send({ image: webcamRef.current.video });
    },
    width: 1280,
    height: 720,
  });
  camera.start();
    }
  }, []);

最后,在 onResults 回调中,我建议先打印结果,以检查 Mediapipe 实现是否工作正常。并且不要忘记在绘制之前设置画布大小。

function onResults(results){
   console.log(results)
   canvasCtx = canvasReference.current.getContext('2d')
   canvas.width = webcamRef.current.video.videoWidth;
   canvas.height = webcamRef.current.video.videoHeight;;

   ...
}

祝你好运!:)

于 2021-06-07T14:59:51.130 回答