我正在尝试在用户的脸上放置一个方形矩形,我正在识别它CIFaceFeature
在全屏 ( self.view.frame
) 视频源上实时使用。但是,我从中获取的坐标CIFaceFeature.bounds
来自与视图使用的坐标系不同的坐标系。我已经尝试从这个和其他示例转换这些坐标。但是因为我没有在视频源上运行它,所以我没有可以传递的图像CIImage
以方便坐标转换。下面是我的配置示例,知道如何转换为可用的CGRect
吗?
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
let opaqueBuffer = Unmanaged<CVImageBuffer>.passUnretained(imageBuffer!).toOpaque()
let pixelBuffer = Unmanaged<CVPixelBuffer>.fromOpaque(opaqueBuffer).takeUnretainedValue()
let sourceImage = CIImage(cvPixelBuffer: pixelBuffer)
let features = self.faceDetector!.features(in: sourceImage, options: options)
if (features.count != 0) {
let faceImage = sourceImage
let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: [CIDetectorAccuracy: CIDetectorAccuracyHigh])
let faces = faceDetector?.features(in: faceImage) as! [CIFaceFeature]
let transformScale = CGAffineTransform(scaleX: 1, y: -1)
let transform = transformScale.translatedBy(x: 0, y: -faceImage.extent.height)
for feature in features as! [CIFaceFeature] {
faceBounds = feature.bounds
var fb = faceBounds?.applying(transform)
// imageViewSize is the screen frame
let scale = min(imageViewSize.width / fb!.width,
imageViewSize.height / fb!.height)
let dx = (imageViewSize.width - fb!.width * scale) / 2
let dy = (imageViewSize.height - fb!.height * scale) / 2
fb?.applying(CGAffineTransform(scaleX: scale, y: scale))
fb?.origin.x += dx
fb?.origin.y += dy
realFaceRect = fb // COMPLETELY WRONG :'(
}
}