5

我可以从 Live Web Cam 中获取人脸作为Windows.Media.FaceAnalysis DetectedFace对象列表。现在我想将这些人脸传递给 Microsoft Cognitive Services API 以检测人脸并获取​​人脸属性。我怎样才能做到这一点?

IList<DetectedFace> faces = null;

// Create a VideoFrame object specifying the pixel format we want our capture image to be (NV12 bitmap in this case).
// GetPreviewFrame will convert the native webcam frame into this format.
const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Nv12;
using (VideoFrame previewFrame = new VideoFrame(InputPixelFormat, (int)this.videoProperties.Width, (int)this.videoProperties.Height))
{
    await this.mediaCapture.GetPreviewFrameAsync(previewFrame);

    // The returned VideoFrame should be in the supported NV12 format but we need to verify this.
    if (FaceDetector.IsBitmapPixelFormatSupported(previewFrame.SoftwareBitmap.BitmapPixelFormat))
    {
        faces = await this.faceDetector.DetectFacesAsync(previewFrame.SoftwareBitmap);

        // Now pass this faces to Cognitive services API
        // faceClient.DetectAsync
    }
}
4

1 回答 1

3

DetectedFace对象包含实际面部的边界框。因此,您可以使用这些知识来创建人脸的内存流并将其发送到人脸客户端

private async Task DetectAsync()
{
    IList<DetectedFace> faces = null;
    const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Nv12;
    using (VideoFrame destinationPreviewFrame = new VideoFrame(InputPixelFormat, 640, 480))
    {
        await this._mediaCapture.GetPreviewFrameAsync(destinationPreviewFrame);

        if (FaceDetector.IsBitmapPixelFormatSupported(InputPixelFormat))
        {
            faces = await this.faceDetector.DetectFacesAsync(destinationPreviewFrame.SoftwareBitmap);

            foreach (var face in faces)
            {
                // convert NV12 to RGBA16 format
                SoftwareBitmap convertedBitmap = SoftwareBitmap.Convert(destinationPreviewFrame.SoftwareBitmap, BitmapPixelFormat.Rgba16);

                // get the raw bytes of the detected face
                byte[] rawBytes = await GetBytesFromBitmap(convertedBitmap, BitmapEncoder.BmpEncoderId, face.FaceBox);

                // read the bitmap and send it to the face client
                using (Stream stream = rawBytes.AsBuffer().AsStream())
                {
                    var faceAttributesToReturn = new List<FaceAttributeType>()
                    {
                        FaceAttributeType.Age,
                        FaceAttributeType.Emotion,
                        FaceAttributeType.Hair
                    };

                    Face[] detectedFaces = await this.faceClient.DetectAsync(stream, true, true, faceAttributesToReturn);

                    Debug.Assert(detectedFaces.Length > 0);
                }
            }
        }
    }
}

private async Task<byte[]> GetBytesFromBitmap(SoftwareBitmap soft, Guid encoderId, BitmapBounds bounds)
{
    byte[] array = null;

    using (var ms = new InMemoryRandomAccessStream())
    {
        BitmapEncoder encoder = await BitmapEncoder.CreateAsync(encoderId, ms);
        encoder.SetSoftwareBitmap(soft);

        // apply the bounds of the face
        encoder.BitmapTransform.Bounds = bounds;

        await encoder.FlushAsync();

        array = new byte[ms.Size];

        await ms.ReadAsync(array.AsBuffer(), (uint)ms.Size, InputStreamOptions.None);
    }

    return array;
}
于 2017-09-24T04:54:51.707 回答