6

I want to do something similar to what Johhny Lee did in his Wii head tracking http://www.youtube.com/watch?v=Jd3-eiid-Uw&feature=player_embedded

But I want to use the Kinect. Since Microsoft's sdk exposes the skeletal joints, I had hoped I might be able to just use that to get the head position. The problem is that I want to do this with my desktop computer and its monitor. If I put the Kinect sensor right next to my monitor and sit at the desk. pretty much just my head and neck are visible to the sensor, so the skeletal tracking doesnt pickup on my head position.

Is anyone familiar with a head tracking project using the Kinect? Preferably in C#

4

5 回答 5

2

我认为对于这个应用程序,您不能使用任何框架(如 Microsoft 的 SDK 或 OpenNI)提供的骨架跟踪。

我建议通过对原始深度数据应用深度阈值来分割用户的头部。这应该会导致背景减少。我认为已经有现有的方法可以做到这一点。

作为第二步,您希望在分段用户内部有一个轴之类的东西。最简单的方法是使用 opencv fitEllipse。返回的椭圆的长轴与深度信息相结合,为您提供了这个轴。

这种方法仅在大多数分割点属于用户头部时才有效。如果您距离较远,则必须考虑一种仅分割头部的方法。椭圆拟合应该始终有效。

于 2011-07-31T18:49:53.503 回答
1

您不需要 kinect 来跟踪您的头部位置。您可以通过使用面部跟踪来使用普通相机和 openCV 做同样的事情。

此处显示的简单示例:http: //vimeo.com/19464641

在视频中,我使用 openCV 来跟踪我的脸(你在角落里几乎看不到,但红点表示我的脸位置)。

于 2011-09-18T13:09:24.330 回答
1

查看有关此类主题的Channel 9s教程。你会去骨骼基础视频。但是如果你想节省时间,这里有一些代码。
XAML

<Window x:Class="SkeletalTracking.MainWindow"
    xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation"
    xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"
    Title="MainWindow" Height="600" Width="800" Loaded="Window_Loaded" 
    xmlns:my="clr-namespace:Microsoft.Samples.Kinect.WpfViewers;assembly=Microsoft.Samples.Kinect.WpfViewers" 
    Closing="Window_Closing" WindowState="Maximized">       
<Canvas Name="MainCanvas">
    <my:KinectColorViewer Canvas.Left="0" Canvas.Top="0" Width="640" Height="480" Name="kinectColorViewer1" 
                          Kinect="{Binding ElementName=kinectSensorChooser1, Path=Kinect}" />
    <my:KinectSensorChooser Canvas.Left="250" Canvas.Top="380" Name="kinectSensorChooser1" Width="328" />
    <Image Canvas.Left="66" Canvas.Top="90" Height="87" Name="headImage" Stretch="Fill" Width="84" Source="/SkeletalTracking;component/c4f-color.png" />
</Canvas>

内部代码

using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Windows;
using System.Windows.Controls;
using System.Windows.Data;
using System.Windows.Documents;
using System.Windows.Input;
using System.Windows.Media;
using System.Windows.Media.Imaging;
using System.Windows.Navigation;
using System.Windows.Shapes;
using Microsoft.Kinect;
using Coding4Fun.Kinect.Wpf; 

namespace SkeletalTracking
{
/// <summary>
/// Interaction logic for MainWindow.xaml
/// </summary>
public partial class MainWindow : Window
{
    public MainWindow()
    {
        InitializeComponent();
    }

    bool closing = false;
    const int skeletonCount = 6; 
    Skeleton[] allSkeletons = new Skeleton[skeletonCount];

    private void Window_Loaded(object sender, RoutedEventArgs e)
    {
        kinectSensorChooser1.KinectSensorChanged += new DependencyPropertyChangedEventHandler(kinectSensorChooser1_KinectSensorChanged);

    }

    void kinectSensorChooser1_KinectSensorChanged(object sender, DependencyPropertyChangedEventArgs e)
    {
        KinectSensor old = (KinectSensor)e.OldValue;

        StopKinect(old);

        KinectSensor sensor = (KinectSensor)e.NewValue;

        if (sensor == null)
        {
            return;
        }




        var parameters = new TransformSmoothParameters
        {
            Smoothing = 0.3f,
            Correction = 0.0f,
            Prediction = 0.0f,
            JitterRadius = 1.0f,
            MaxDeviationRadius = 0.5f
        };
        //sensor.SkeletonStream.Enable(parameters);

        sensor.SkeletonStream.Enable();

        sensor.AllFramesReady += new EventHandler<AllFramesReadyEventArgs>(sensor_AllFramesReady);
        sensor.DepthStream.Enable(DepthImageFormat.Resolution640x480Fps30); 
        sensor.ColorStream.Enable(ColorImageFormat.RgbResolution640x480Fps30);

        try
        {
            sensor.Start();
        }
        catch (System.IO.IOException)
        {
            kinectSensorChooser1.AppConflictOccurred();
        }
    }

    void sensor_AllFramesReady(object sender, AllFramesReadyEventArgs e)
    {
        if (closing)
        {
            return;
        }

        //Get a skeleton
        Skeleton first =  GetFirstSkeleton(e);

        if (first == null)
        {
            return; 
        }



        //set scaled position
        ScalePosition(headImage, first.Joints[JointType.Head]);
        //ScalePosition(leftEllipse, first.Joints[JointType.HandLeft]);
        //ScalePosition(rightEllipse, first.Joints[JointType.HandRight]);

        GetCameraPoint(first, e); 

    }

    void GetCameraPoint(Skeleton first, AllFramesReadyEventArgs e)
    {

        using (DepthImageFrame depth = e.OpenDepthImageFrame())
        {
            if (depth == null ||
                kinectSensorChooser1.Kinect == null)
            {
                return;
            }


            //Map a joint location to a point on the depth map
            //head
            DepthImagePoint headDepthPoint =
                depth.MapFromSkeletonPoint(first.Joints[JointType.Head].Position);
            //left hand
            DepthImagePoint leftDepthPoint =
                depth.MapFromSkeletonPoint(first.Joints[JointType.HandLeft].Position);
            //right hand
            DepthImagePoint rightDepthPoint =
                depth.MapFromSkeletonPoint(first.Joints[JointType.HandRight].Position);


            //Map a depth point to a point on the color image
            //head
            ColorImagePoint headColorPoint =
                depth.MapToColorImagePoint(headDepthPoint.X, headDepthPoint.Y,
                ColorImageFormat.RgbResolution640x480Fps30);
            //left hand
            ColorImagePoint leftColorPoint =
                depth.MapToColorImagePoint(leftDepthPoint.X, leftDepthPoint.Y,
                ColorImageFormat.RgbResolution640x480Fps30);
            //right hand
            ColorImagePoint rightColorPoint =
                depth.MapToColorImagePoint(rightDepthPoint.X, rightDepthPoint.Y,
                ColorImageFormat.RgbResolution640x480Fps30);


            //Set location
            CameraPosition(headImage, headColorPoint);
            //CameraPosition(leftEllipse, leftColorPoint);
            //CameraPosition(rightEllipse, rightColorPoint);
        }        
    }


    Skeleton GetFirstSkeleton(AllFramesReadyEventArgs e)
    {
        using (SkeletonFrame skeletonFrameData = e.OpenSkeletonFrame())
        {
            if (skeletonFrameData == null)
            {
                return null; 
            }


            skeletonFrameData.CopySkeletonDataTo(allSkeletons);

            //get the first tracked skeleton
            Skeleton first = (from s in allSkeletons
                                     where s.TrackingState == SkeletonTrackingState.Tracked
                                     select s).FirstOrDefault();

            return first;

        }
    }

    private void StopKinect(KinectSensor sensor)
    {
        if (sensor != null)
        {
            if (sensor.IsRunning)
            {
                //stop sensor 
                sensor.Stop();

                //stop audio if not null
                if (sensor.AudioSource != null)
                {
                    sensor.AudioSource.Stop();
                }


            }
        }
    }

    private void CameraPosition(FrameworkElement element, ColorImagePoint point)
    {
        //Divide by 2 for width and height so point is right in the middle 
        // instead of in top/left corner
        Canvas.SetLeft(element, point.X - element.Width / 2);
        Canvas.SetTop(element, point.Y - element.Height / 2);

    }

    private void ScalePosition(FrameworkElement element, Joint joint)
    {
        //convert the value to X/Y
        //Joint scaledJoint = joint.ScaleTo(1280, 720); 

        //convert & scale (.3 = means 1/3 of joint distance)
        //Joint scaledJoint = joint.ScaleTo(1280, 720, .3f, .3f);

        Canvas.SetLeft(element, scaledJoint.Position.X);
        Canvas.SetTop(element, scaledJoint.Position.Y); 

    }


    private void Window_Closing(object sender, System.ComponentModel.CancelEventArgs e)
    {
        closing = true; 
        StopKinect(kinectSensorChooser1.Kinect); 
    }



   }
}

我个人建议观看视频,因为它们解释了一切。祝你的项目好运!

于 2012-06-02T20:34:05.220 回答
1

官方的 Kinect for Windows SDK 存在一些限制,这与为 XBox 和 XDK 提供的指南一致,因为您需要距离传感器 1.2m 到 3.5m 才能使用 Kinect 传感器。这种限制实际上在其他 SDK 中得到了减少,例如 OpenNI/NITE 库,它允许您检测更靠近传感器的骨架/对象。

骨骼输入也会遇到的问题是它只会检测与骨骼成比例的头部位置,但如果您左右旋转头部则不会。为了实现这一点,您不会使用原始深度流和一些围绕对象识别的智能,这有点复杂。

过去,我使用过这个商业 .NET API,它使用网络摄像头来跟踪头部运动,并实现你所追求的:http: //luxand.com/facesdk/index2.php

于 2011-07-13T04:04:21.867 回答
-1

我建议使用:Aforge.net与 Microsoft XNA Framework 一起使用,或者单独使用 Aforge.net。不过,您需要自己进行一些开发。我也在使用 C# 做类似的事情。我认为您将无法找到完整的开箱即用示例。还没有机构这样做。(如果我错了,请纠正我)。

于 2011-09-19T09:27:31.873 回答