1

我正在探索 Microsoft Cognitive Face API,我对它很陌生。我可以使用一个简单的图像来实现人脸属性,但是,我的问题是如何在 WPF c# 中的 Kinect 的实时视频源中获取一个人的人脸属性。如果有人可以帮助我,那就太好了。提前致谢!

我尝试每隔 2 秒从 Kinect 颜色源捕获帧到某个文件位置,并使用该文件路径并将其转换为流,然后将其传递给 Face-API 函数,这样就可以了。以下是我尝试过的代码。

namespace CognitiveFaceAPISample
{

    public partial class MainWindow : Window
    {
        private readonly IFaceServiceClient faceServiceClient = new FaceServiceClient("c2446f84b1eb486ca11e2f5d6e670878");
        KinectSensor ks;
        ColorFrameReader cfr;
        byte[] colorData;
        ColorImageFormat format;
        WriteableBitmap wbmp;
        BitmapSource bmpSource;
        int imageSerial;
        DispatcherTimer timer,timer2;
        string streamF = "Frames//frame.jpg";

        public MainWindow()
        {
            InitializeComponent();
            ks = KinectSensor.GetDefault();
            ks.Open();
            var fd = ks.ColorFrameSource.CreateFrameDescription(ColorImageFormat.Bgra);
            uint frameSize = fd.BytesPerPixel * fd.LengthInPixels;
            colorData = new byte[frameSize];
            format = ColorImageFormat.Bgra;
            imageSerial = 0;

            cfr = ks.ColorFrameSource.OpenReader();
            cfr.FrameArrived += cfr_FrameArrived;
        }

        void cfr_FrameArrived(object sender, ColorFrameArrivedEventArgs e)
        {
            if (e.FrameReference == null) return;

            using (ColorFrame cf = e.FrameReference.AcquireFrame())
            {
                if (cf == null) return;
                cf.CopyConvertedFrameDataToArray(colorData, format);
                var fd = cf.FrameDescription;

                // Creating BitmapSource
                var bytesPerPixel = (PixelFormats.Bgr32.BitsPerPixel) / 8;
                var stride = bytesPerPixel * cf.FrameDescription.Width;

                bmpSource = BitmapSource.Create(fd.Width, fd.Height, 96.0, 96.0, PixelFormats.Bgr32, null, colorData, stride);

                // WritableBitmap to show on UI
                wbmp = new WriteableBitmap(bmpSource);
                FacePhoto.Source = wbmp;          

            }
        }

        private void SaveImage(BitmapSource image)
        {
            try
            {
                FileStream stream = new System.IO.FileStream(@"Frames\frame.jpg", System.IO.FileMode.OpenOrCreate);
                JpegBitmapEncoder encoder = new JpegBitmapEncoder();
                encoder.FlipHorizontal = true;
                encoder.FlipVertical = false;
                encoder.QualityLevel = 30;
                encoder.Frames.Add(BitmapFrame.Create(image));
                encoder.Save(stream);
                stream.Close();
            }
            catch (Exception)
            {

            }
        }       


        private void Window_Loaded(object sender, RoutedEventArgs e)
        {
            timer = new DispatcherTimer { Interval = TimeSpan.FromSeconds(2) };
            timer.Tick += Timer_Tick;
            timer.Start();
            timer2 = new DispatcherTimer { Interval = TimeSpan.FromSeconds(5) };
            timer2.Tick += Timer2_Tick;
            timer2.Start();
        }
        private void Timer_Tick(object sender, EventArgs e)
        {
            SaveImage(bmpSource);
        }
        private async void Timer2_Tick(object sender, EventArgs e)
        {
            Title = "Detecting...";
            FaceRectangle[] faceRects = await UploadAndDetectFaces(streamF);
            Face[] faceAttributes = await UploadAndDetectFaceAttributes(streamF);
            Title = String.Format("Detection Finished. {0} face(s) detected", faceRects.Length);

            if (faceRects.Length > 0)
            {
                DrawingVisual visual = new DrawingVisual();
                DrawingContext drawingContext = visual.RenderOpen();
                drawingContext.DrawImage(bmpSource,
                    new Rect(0, 0, bmpSource.Width, bmpSource.Height));
                double dpi = bmpSource.DpiX;
                double resizeFactor = 96 / dpi;

                foreach (var faceRect in faceRects)
                {
                    drawingContext.DrawRectangle(
                        Brushes.Transparent,
                        new Pen(Brushes.Red, 2),
                        new Rect(
                            faceRect.Left * resizeFactor,
                            faceRect.Top * resizeFactor,
                            faceRect.Width * resizeFactor,
                            faceRect.Height * resizeFactor
                            )
                    );
                }

                drawingContext.Close();
                RenderTargetBitmap faceWithRectBitmap = new RenderTargetBitmap(
                    (int)(bmpSource.PixelWidth * resizeFactor),
                    (int)(bmpSource.PixelHeight * resizeFactor),
                    96,
                    96,
                    PixelFormats.Pbgra32);
                faceWithRectBitmap.Render(visual);
                FacePhoto.Source = faceWithRectBitmap;
            }

            if (faceAttributes.Length > 0)
            {
                foreach (var faceAttr in faceAttributes)
                {
                    Label lb = new Label();
                    //Canvas.SetLeft(lb, lb.Width);
                    lb.Content = faceAttr.FaceAttributes.Gender;// + " " + faceAttr.Gender + " " + faceAttr.FacialHair + " " + faceAttr.Glasses + " " + faceAttr.HeadPose + " " + faceAttr.Smile;
                    lb.FontSize = 50;
                    lb.Width = 200;
                    lb.Height = 100;
                    stack.Children.Add(lb);
                }
            }
        }

        private async Task<FaceRectangle[]> UploadAndDetectFaces(string imageFilePath)
        {
            try
            {
                using (Stream imageFileStream = File.OpenRead(imageFilePath))
                {
                    var faces = await faceServiceClient.DetectAsync(imageFilePath);
                    var faceRects = faces.Select(face => face.FaceRectangle);
                    var faceAttrib = faces.Select(face => face.FaceAttributes);
                    return faceRects.ToArray();

                }
            }
            catch (Exception)
            {
                return new FaceRectangle[0];
            }
        }

        private async Task<Face[]> UploadAndDetectFaceAttributes(string imageFilePath)
        {
            try
            {
                using (Stream imageFileStream = File.Open(imageFilePath, FileMode.Open, FileAccess.Read, FileShare.ReadWrite))
                {
                    var faces = await faceServiceClient.DetectAsync(imageFileStream, true, true, new FaceAttributeType[] { FaceAttributeType.Gender, FaceAttributeType.Age, FaceAttributeType.Smile, FaceAttributeType.Glasses, FaceAttributeType.HeadPose, FaceAttributeType.FacialHair });

                    return faces.ToArray();

                }
            }
            catch (Exception)
            {
                return new Face[0];
            }
        }
}

上面的代码运行良好。但是,我想将 Kinect Color Feed 的每一帧直接转换为 Stream,虽然我搜索过但我不知道该怎么做,但对我没有任何帮助。如果有人可以帮助我,那就太好了。谢谢!

4

1 回答 1

1

SaveImage您可以将帧持久保存到 a 中,将其MemoryStream倒回(通过调用Position = 0),然后将该流发送到DetectAsync().

另请注意,在中UploadAndDetectFaces,您应该发送imageFileStream,而不是发送imageFilePathDetectAsync()。无论如何,您可能不想同时调用两者UploadAndDetectFacesUploadAndDetectFaceAttributes因为您只是将工作加倍(并且达到了配额/速率限制。)

于 2017-02-11T01:37:06.947 回答