据我了解官方 Kinect 1.5 SDK,它带有人脸跟踪和骨骼跟踪。简单的斑点检测怎么样?我想做的就是跟踪一个圆形/椭圆形的物体。我在 SDK 中找不到任何代码,所以我应该使用 opencv 或其他库吗?
(我的代码是 C++)
EDIT1是否可以调整面部跟踪器,以便它可以检测一般的圆形(而不是面部)?
EDIT2 这里是 SDK 附带的示例中的深度处理代码。如何让 OpenCV 从中提取 blob?
void CDepthBasics::ProcessDepth()
{
HRESULT hr;
NUI_IMAGE_FRAME imageFrame;
// Attempt to get the depth frame
hr = m_pNuiSensor->NuiImageStreamGetNextFrame(m_pDepthStreamHandle, 0, &imageFrame);
if (FAILED(hr))
{
return;
}
INuiFrameTexture * pTexture = imageFrame.pFrameTexture;
NUI_LOCKED_RECT LockedRect;
// Lock the frame data so the Kinect knows not to modify it while we're reading it
pTexture->LockRect(0, &LockedRect, NULL, 0);
// Make sure we've received valid data
if (LockedRect.Pitch != 0)
{
BYTE * rgbrun = m_depthRGBX;
const USHORT * pBufferRun = (const USHORT *)LockedRect.pBits;
// end pixel is start + width*height - 1
const USHORT * pBufferEnd = pBufferRun + (cDepthWidth * cDepthHeight);
while ( pBufferRun < pBufferEnd )
{
// discard the portion of the depth that contains only the player index
USHORT depth = NuiDepthPixelToDepth(*pBufferRun);
// to convert to a byte we're looking at only the lower 8 bits
// by discarding the most significant rather than least significant data
// we're preserving detail, although the intensity will "wrap"
BYTE intensity = static_cast<BYTE>(depth % 256);
// Write out blue byte
*(rgbrun++) = intensity
// Write out green byte
*(rgbrun++) = intensity;
// Write out red byte
*(rgbrun++) = intensity;
// We're outputting BGR, the last byte in the 32 bits is unused so skip it
// If we were outputting BGRA, we would write alpha here.
++rgbrun;
// Increment our index into the Kinect's depth buffer
++pBufferRun;
}
// Draw the data with Direct2D
m_pDrawDepth->Draw(m_depthRGBX, cDepthWidth * cDepthHeight * cBytesPerPixel);
}
// We're done with the texture so unlock it
pTexture->UnlockRect(0);
// Release the frame
m_pNuiSensor->NuiImageStreamReleaseFrame(m_pDepthStreamHandle, &imageFrame);
}