我是 kinect 开发的新手,也是 stackoverflow 的新手!这是我的情况:
有了深度相机,我可以轻松获取深度数据。我想要做的是一旦我检测到相机前面存在一个人(玩家),我将只提取玩家的深度像素,并将其放在透明背景上,以便输出为静态图像仅播放器的深度图像,在透明背景上。
我想问有没有可能做这份工作?我做了一些研究,发现一些函数可能有助于这样做,例如 SkeletonToDepthImage() 或深度像素数据(包括距离和玩家索引)。
我假设您的意思是要根据深度数据渲染玩家的轮廓,只显示他们的轮廓。那是对的吗?
Kinect for Windows Developer Toolkit提供了多个执行此操作的示例。“绿屏”示例向您展示如何提取深度数据并将其映射到颜色流以仅在所选背景上显示播放器。“基本交互”示例有一个剪影示例,它完全符合我的解释你想要的。
查看 Microsoft 提供的示例,以更好地了解 Kinect 的许多基本使用场景。
基于 Basic Interactions 项目中的剪影示例,我编写了一个剪影控件。控件的核心由以下两个函数(即实际产生轮廓的函数)组成。
private void OnSkeletonFrameReady(object sender, SkeletonFrameReadyEventArgs e)
{
using (SkeletonFrame skeletonFrame = e.OpenSkeletonFrame())
{
if (skeletonFrame != null && skeletonFrame.SkeletonArrayLength > 0)
{
if (_skeletons == null || _skeletons.Length != skeletonFrame.SkeletonArrayLength)
{
_skeletons = new Skeleton[skeletonFrame.SkeletonArrayLength];
}
skeletonFrame.CopySkeletonDataTo(_skeletons);
// grab the tracked skeleton and set the playerIndex for use pulling
// the depth data out for the silhouette.
// TODO: this assumes only a single tracked skeleton, we want to find the
// closest person out of the tracked skeletons (see above).
this.playerIndex = -1;
for (int i = 0; i < _skeletons.Length; i++)
{
if (_skeletons[i].TrackingState != SkeletonTrackingState.NotTracked)
{
this.playerIndex = i+1;
}
}
}
}
}
private void OnDepthFrameReady(object sender, DepthImageFrameReadyEventArgs e)
{
using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
{
if (depthFrame != null)
{
// check if the format has changed.
bool haveNewFormat = this.lastImageFormat != depthFrame.Format;
if (haveNewFormat)
{
this.pixelData = new short[depthFrame.PixelDataLength];
this.depthFrame32 = new byte[depthFrame.Width * depthFrame.Height * Bgra32BytesPerPixel];
this.convertedDepthBits = new byte[this.depthFrame32.Length];
}
depthFrame.CopyPixelDataTo(this.pixelData);
for (int i16 = 0, i32 = 0; i16 < pixelData.Length && i32 < depthFrame32.Length; i16++, i32 += 4)
{
int player = pixelData[i16] & DepthImageFrame.PlayerIndexBitmask;
if (player == this.playerIndex)
{
convertedDepthBits[i32 + RedIndex] = 0x44;
convertedDepthBits[i32 + GreenIndex] = 0x23;
convertedDepthBits[i32 + BlueIndex] = 0x59;
convertedDepthBits[i32 + 3] = 0x66;
}
else if (player > 0)
{
convertedDepthBits[i32 + RedIndex] = 0xBC;
convertedDepthBits[i32 + GreenIndex] = 0xBE;
convertedDepthBits[i32 + BlueIndex] = 0xC0;
convertedDepthBits[i32 + 3] = 0x66;
}
else
{
convertedDepthBits[i32 + RedIndex] = 0x0;
convertedDepthBits[i32 + GreenIndex] = 0x0;
convertedDepthBits[i32 + BlueIndex] = 0x0;
convertedDepthBits[i32 + 3] = 0x0;
}
}
if (silhouette == null || haveNewFormat)
{
silhouette = new WriteableBitmap(
depthFrame.Width,
depthFrame.Height,
96,
96,
PixelFormats.Bgra32,
null);
SilhouetteImage.Source = silhouette;
}
silhouette.WritePixels(
new Int32Rect(0, 0, depthFrame.Width, depthFrame.Height),
convertedDepthBits,
depthFrame.Width * Bgra32BytesPerPixel,
0);
Silhouette = silhouette;
this.lastImageFormat = depthFrame.Format;
}
}
}