0

试图让BlazePose TFJS在 Angular 12 中工作。我有一个空项目并安装了所需的包(我认为)。Package.json 看起来像这样:

{
  "name": "posetrackingtest",
  "version": "0.0.0",
  "scripts": {
    "ng": "ng",
    "start": "ng serve",
    "build": "ng build",
    "watch": "ng build --watch --configuration development",
    "test": "ng test"
  },
  "private": true,
  "dependencies": {
    "@angular/animations": "~12.0.3",
    "@angular/common": "~12.0.3",
    "@angular/compiler": "~12.0.3",
    "@angular/core": "~12.0.3",
    "@angular/forms": "~12.0.3",
    "@angular/platform-browser": "~12.0.3",
    "@angular/platform-browser-dynamic": "~12.0.3",
    "@angular/router": "~12.0.3",
    "@mediapipe/pose": "^0.3.1621277220",
    "@tensorflow-models/pose-detection": "^0.0.3",
    "@tensorflow/tfjs-backend-webgl": "^3.7.0",
    "@tensorflow/tfjs-converter": "^3.7.0",
    "@tensorflow/tfjs-core": "^3.7.0",
    "rxjs": "~6.6.0",
    "tslib": "^2.1.0",
    "zone.js": "~0.11.4"
  },
  "devDependencies": {
    "@angular-devkit/build-angular": "~12.0.3",
    "@angular/cli": "~12.0.3",
    "@angular/compiler-cli": "~12.0.3",
    "@types/jasmine": "~3.6.0",
    "@types/node": "^12.11.1",
    "jasmine-core": "~3.7.0",
    "karma": "~6.3.0",
    "karma-chrome-launcher": "~3.1.0",
    "karma-coverage": "~2.0.3",
    "karma-jasmine": "~4.0.0",
    "karma-jasmine-html-reporter": "^1.5.0",
    "typescript": "~4.2.3"
  }
}

我有一个带有以下 HTML 的组件:

<video
    #videoplayer
    id="videoplayer"
    autoplay>
</video>

我的组件打字稿代码是:

import { AfterViewInit, Component, ElementRef, OnInit, ViewChild } from '@angular/core';

import '@tensorflow/tfjs-backend-webgl';
import * as poseDetection from '@tensorflow-models/pose-detection';


@Component({
  selector: 'app-pose',
  templateUrl: './pose.component.html',
  styleUrls: ['./pose.component.css']
})
export class PoseComponent implements OnInit, AfterViewInit {

  @ViewChild("videoplayer", { static: false }) videoplayer: ElementRef;

  public detector: any;
  public poses: any;
  public error: string;

  constructor() { }

  ngOnInit(): void {}


  ngAfterViewInit () : void{
    this.init();
  }

  async init() {

    if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
      try {
        const stream = await navigator.mediaDevices.getUserMedia({
          video: true
        });
        if (stream) {
          console.log(stream);
          this.videoplayer.nativeElement.srcObject = stream;

          console.log(this.videoplayer.nativeElement);

          console.log("About to load detector");
          let detectorConfig = {
            runtime: 'tfjs',
            enableSmoothing: true,
            modelType: 'full'
          };
          this.detector = await poseDetection.createDetector(poseDetection.SupportedModels.BlazePose, detectorConfig);
          console.log(this.detector);
          console.log("Detector loaded");

          let poses = await this.detector.estimatePoses(this.videoplayer.nativeElement);

          console.log(poses);

          this.error = null;
        } else {
          this.error = "You have no output video device";
        }
      } catch (e) {
        this.error = e;
      }
    }

  }


}

我没有收到任何错误,当我执行它时可以通过 HTML 页面上的网络摄像头看到自己,但输出console.log(poses);只是一个空列表[]。即没有姿势数据。

另外,如何让let poses = await this.detector.estimatePoses(this.videoplayer.nativeElement);线路不断执行?this.poses变量是不断更新还是我需要以某种方式迭代?

请问我做错了什么?谢谢。

4

1 回答 1

0

我遇到了一个相对相似的问题(尽管我使用的是纯 JavaScript,但 Blazepose 输出为空)。为了解决这个问题,我按照谷歌在他们的 camera.js 文件中所做的设置我的相机(https://github.com/tensorflow/tfjs-models/blob/master/pose-detection/demos/live_video/src/camera.js)。

const videoElement = document.getElementsByClassName('input_video')[0];

if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
      throw new Error(
          'Browser API navigator.mediaDevices.getUserMedia not available');
    }
    const videoConfig = {
      'audio': false,
      'video': {
        facingMode: 'user',
        width: 320,
        height: 240,
        frameRate: {
          ideal: 60,
        }
      }
    };

    const stream = await navigator.mediaDevices.getUserMedia(videoConfig);

    videoElement.srcObject = stream;

    await new Promise((resolve) => {
      videoElement.onloadedmetadata = () => {
        resolve(videoElement);
      };
    });

    videoElement.play();
    resizeCanvasToDisplaySize(canvasElement)

    const videoWidth = videoElement.videoWidth;
    const videoHeight = videoElement.videoHeight;
    // Must set below two lines, otherwise video element doesn't show.
    videoElement.width = videoWidth;
    videoElement.height = videoHeight;
    
}

为了回答关于让线路不断执行的部分,我使用了 requestAnimationFrame

async function updateVideo(){
    const poses = await detector.estimatePoses(videoElement,    estimationConfig, timestamp);
    await updateScreen(poses)
    //console.log(poses)
    window.requestAnimationFrame(updateVideo);
}

然后我们可以在视频加载后开始这个循环

videoElement.onloadeddata = async function() {
        updateVideo()
}

这也可以使用 setInterval 完成,但是最好请求动画帧,因为如果您最终落后于当前帧,它不会导致积压,它会向前跳到当前帧。

于 2021-07-24T00:41:59.113 回答