我正在创建一个 Web 应用程序,它在屏幕上显示用户的身体,并使用 openpose 模型标记关键点(眼睛、鼻子、嘴巴、肩膀等)。但是,在为模型提供图像帧后,它不会为每个身体部位返回正确的 x、y 点。
index.html
:
<html>
<body>
<!-- Load TensorFlow.js -->
<script src="https://unpkg.com/@tensorflow/tfjs"></script>
<!-- Load Posenet -->
<script src="https://unpkg.com/@tensorflow-models/posenet">
</script>
<video id="video" width=50% autoplay="true"></video>
<canvas id="output"></canvas>
<script src="index.js" type="module">
</script>
</body>
</html>
屏幕应显示网络摄像头视频和一个小红色方块,即鼻子应该在的位置。当用户的鼻子改变位置时,小红方块应该改变位置相同的距离。这没有发生。
index.js
:
const scaleFactor = 0.50;
const flipHorizontal = false;
const outputStride = 16;
console.log("loading model...")
const net = await posenet.load({
architecture: "MobileNetV1",
});
console.log("...model loaded!")
var video = document.querySelector("#video");
var canvas = document.getElementById("output")
var ctx = canvas.getContext("2d")
function setupVideo() {
if (navigator.mediaDevices.getUserMedia) {
navigator.mediaDevices.getUserMedia({ video: true })
.then(function (stream) {
video.srcObject = stream;
})
.catch(function (err0r) {
console.log("Something went wrong!");
});
}
}
async function detectPose() {
let pose = await net.estimateSinglePose(video, scaleFactor, flipHorizontal, outputStride);
return new Promise(resolve => {
resolve(pose)
})
}
setupVideo()
canvas.height = 600
canvas.width = 500
function drawpose() {
detectPose().then(pose => {
ctx.fillStyle = "#FFFFFF"
ctx.fillRect(0, 0, canvas.width, canvas.height)
ctx.fillStyle = "#FF0000"
console.log(pose)
let nose = pose["keypoints"][0]
console.log(nose.position)
let i
// for (i = 0; i < pose.keypoints.length; i ++)
for (i = 0; i < 1; i ++)
{
ctx.fillRect(pose.keypoints[i].position.x, pose.keypoints[i].position.y, 10, 10);
}
})
}
video.addEventListener('loadeddata', (e) => {
console.log("loaded video!")
video.setAttribute("videoHeight", video.videoWidth)
setInterval(drawpose, 500)
});