0

我正在尝试使用带有 Kinect 的头部跟踪来控制鼠标。我认为这是可能的,但仍然没有成功。

这是代码:

using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Windows;
using System.Windows.Controls;
using System.Windows.Data;
using System.Windows.Documents;
using System.Windows.Input;
using System.Windows.Media;
using System.Windows.Media.Imaging;
using System.Windows.Navigation;
using System.Windows.Shapes;
using Microsoft.Kinect;
using Microsoft.Kinect.Toolkit.FaceTracking;
using System.IO;
using System.Threading;
using System.Runtime.InteropServices;

namespace AwesomeFaceTracking
{
public partial class MainWindow : Window
{
    KinectSensor kinectSensor;
    FaceTracker faceTracker;
    private byte[] colorPixelData;
    private short[] depthPixelData;
    private Skeleton[] skeletonData;

    [DllImport("user32")]

    public static extern int SetCursorPos(int x, int y);

    private const int MOUSEEVENTF_MOVE = 0x0001;
    private const int MOUSEEVENTF_LEFTDOWN = 0x0002;
    private const int MOUSEEVENTF_LEFTUP = 0x0004;
    private const int MOUSEEVENTF_RIGHTDOWN = 0x0008;

    [DllImport("user32.dll",
        CharSet = CharSet.Auto, CallingConvention = CallingConvention.StdCall)]

    public static extern void mouse_event(int dwflags, int dx, int dy, int cButtons, int dwExtraInfo);

    public MainWindow()
    {
        InitializeComponent();

        // For a KinectSensor to be detected, we can plug it in after the application has been started.
        KinectSensor.KinectSensors.StatusChanged += KinectSensors_StatusChanged;
        // Or it's already plugged in, so we will look for it.
        var kinect = KinectSensor.KinectSensors.FirstOrDefault(k => k.Status == KinectStatus.Connected);
        if (kinect != null)
        {
            OpenKinect(kinect);
        }
    }

    /// <summary>
    /// Handles the StatusChanged event of the KinectSensors control.
    /// </summary>
    /// <param name="sender">The source of the event.</param>
    /// <param name="e">The <see cref="Microsoft.Kinect.StatusChangedEventArgs"/> instance containing the event data.</param>
    void KinectSensors_StatusChanged(object sender, StatusChangedEventArgs e)
    {
        if (e.Status == KinectStatus.Connected)
        {
            OpenKinect(e.Sensor);
        }
    }

    /// <summary>
    /// Opens the kinect.
    /// </summary>
    /// <param name="newSensor">The new sensor.</param>
    private void OpenKinect(KinectSensor newSensor)
    {
        kinectSensor = newSensor;

        // Initialize all the necessary streams:
        // - ColorStream with default format
        // - DepthStream with Near mode
        // - SkeletonStream with tracking in NearReange and Seated mode.

        kinectSensor.ColorStream.Enable();

        //kinectSensor.DepthStream.Range = DepthRange.Near;
        kinectSensor.DepthStream.Enable(DepthImageFormat.Resolution80x60Fps30);

        //kinectSensor.SkeletonStream.EnableTrackingInNearRange = true;
        //kinectSensor.SkeletonStream.TrackingMode = SkeletonTrackingMode.Seated;
        kinectSensor.SkeletonStream.Enable(new TransformSmoothParameters() { Correction = 0.5f, JitterRadius = 0.05f, MaxDeviationRadius = 0.05f, Prediction = 0.5f, Smoothing = 0.5f });

        // Listen to the AllFramesReady event to receive KinectSensor's data.
        kinectSensor.AllFramesReady += new EventHandler<AllFramesReadyEventArgs>(kinectSensor_AllFramesReady);

        // Initialize data arrays
        colorPixelData = new byte[kinectSensor.ColorStream.FramePixelDataLength];
        depthPixelData = new short[kinectSensor.DepthStream.FramePixelDataLength];
        skeletonData = new Skeleton[6];

        // Starts the Sensor
        kinectSensor.Start();

        // Initialize a new FaceTracker with the KinectSensor
        faceTracker = new FaceTracker(kinectSensor);
    }

    /// <summary>
    /// Handles the AllFramesReady event of the kinectSensor control.
    /// </summary>
    /// <param name="sender">The source of the event.</param>
    /// <param name="e">The <see cref="Microsoft.Kinect.AllFramesReadyEventArgs"/> instance containing the event data.</param>
    void kinectSensor_AllFramesReady(object sender, AllFramesReadyEventArgs e)
    {
        // Retrieve each single frame and copy the data
        using (ColorImageFrame colorImageFrame = e.OpenColorImageFrame())
        {
            if (colorImageFrame == null)
                return;
            colorImageFrame.CopyPixelDataTo(colorPixelData);
        }

        using (DepthImageFrame depthImageFrame = e.OpenDepthImageFrame())
        {
            if (depthImageFrame == null)
                return;
            depthImageFrame.CopyPixelDataTo(depthPixelData);
        }

        using (SkeletonFrame skeletonFrame = e.OpenSkeletonFrame())
        {
            if (skeletonFrame == null)
                return;
            skeletonFrame.CopySkeletonDataTo(skeletonData);
        }

        // Retrieve the first tracked skeleton if any. Otherwise, do nothing.
        var skeleton = skeletonData.FirstOrDefault(s => s.TrackingState == SkeletonTrackingState.Tracked);
        if (skeleton == null)
            return;

        // Make the faceTracker processing the data.
        FaceTrackFrame faceFrame = faceTracker.Track(kinectSensor.ColorStream.Format, colorPixelData,
                                          kinectSensor.DepthStream.Format, depthPixelData,
                                          skeleton);



        // If a face is tracked, then we can use it.
        if (faceFrame.TrackSuccessful)
        {
            // Retrieve only the Animation Units coeffs.
            var AUCoeff = faceFrame.GetAnimationUnitCoefficients();

            var jawLowerer = AUCoeff[AnimationUnit.JawLower];
            jawLowerer = jawLowerer < 0 ? 0 : jawLowerer;
            MouthScaleTransform.ScaleY = jawLowerer * 5 + 0.1;
            MouthScaleTransform.ScaleX = (AUCoeff[AnimationUnit.LipStretcher] + 1);

            LeftBrow.Y = RightBrow.Y = (AUCoeff[AnimationUnit.BrowLower]) * 40;

            RightBrowRotate.Angle = (AUCoeff[AnimationUnit.BrowRaiser] * 20);
            LeftBrowRotate.Angle = -RightBrowRotate.Angle;

            CanvasRotate.Angle = faceFrame.Rotation.Z;
            CanvasTranslate.X = faceFrame.Translation.X;
            CanvasTranslate.Y = faceFrame.Translation.Y;

            Joint ScaledJoint = skeleton.Joints[JointType.Head];

            //Funcionando!!!
            int topofscreen = Convert.ToInt32(faceFrame.Translation.Y);
            int leftofscreen = Convert.ToInt32(faceFrame.Translation.X);

            SetCursorPos(leftofscreen, topofscreen);
            Thread.Sleep(1);
        }
    }
}
}

如果我使用 topofscreen 和 leftofscreen,将它们设置为 faceFrame.Rotation.Y 和 X,当我向右或向左点头时它会起作用。但是我想做的是在我以所有可能的角度移动我的头时控制鼠标,比如抬头,对...

关于如何做到这一点的任何提示?

更新

我在这里走错方向了吗?我的意思是,我应该尝试用面部的特定点(如眼睛或鼻子)来跟踪和控制鼠标吗?如果是这样,有没有简单的方法可以做到这一点?

4

0 回答 0