1

我正在尝试使用 Vision 框架检测本地录制视频中的人脸。提供的大多数示例都是在 Live cam 视频中检测人脸。

  • 如何在本地视频中进行人脸检测并在运行时使用 Vision/CoreML 框架在检测到的人脸中放置一个矩形?
4

1 回答 1

6
  • 等待你的 videoItem 准备好播放
  • 向其添加输出
  • 添加一个周期性的观察者,应该在每一帧上被 ping
  • 提取新的像素缓冲区并根据需要在 Vision / CoreML 中处理它们:
  • 如果您使用视觉框架,您希望使用 aVNSequenceRequestHandler而不是VNImageRequestHandler.

.

import UIKit
import AVFoundation
import CoreML
import Vision

class ViewController: UIViewController {
  var player: AVPlayer!
  var videoOutput: AVPlayerItemVideoOutput?

  override func viewDidLoad() {
    super.viewDidLoad()

    let player = AVPlayer(url: localURL)
    player.play()

    player.currentItem?.addObserver(
      self,
      forKeyPath: #keyPath(AVPlayerItem.status),
      options: [.initial, .old, .new],
      context: nil)
    player.addPeriodicTimeObserver(
      forInterval: CMTime(value: 1, timescale: 30),
      queue: DispatchQueue(label: "videoProcessing", qos: .background),
      using: { time in
        self.doThingsWithFaces()
    })
    self.player = player
  }

  override func observeValue(forKeyPath keyPath: String?, of object: Any?, change: [NSKeyValueChangeKey : Any]?, context: UnsafeMutableRawPointer?) {
    guard let keyPath = keyPath, let item = object as? AVPlayerItem
      else { return }

    switch keyPath {
    case #keyPath(AVPlayerItem.status):
      if item.status == .readyToPlay {
        self.setUpOutput()
      }
      break
    default: break
    }
  }

  func setUpOutput() {
    guard self.videoOutput == nil else { return }
    let videoItem = player.currentItem!
    if videoItem.status != AVPlayerItemStatus.readyToPlay {
      // see https://forums.developer.apple.com/thread/27589#128476
      return
    }

    let pixelBuffAttributes = [
      kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange,
      ] as [String: Any]

    let videoOutput = AVPlayerItemVideoOutput(pixelBufferAttributes: pixelBuffAttributes)
    videoItem.add(videoOutput)
    self.videoOutput = videoOutput
  }

  func getNewFrame() -> CVPixelBuffer? {
    guard let videoOutput = videoOutput, let currentItem = player.currentItem else { return nil }

    let time = currentItem.currentTime()
    if !videoOutput.hasNewPixelBuffer(forItemTime: time) { return nil }
    guard let buffer = videoOutput.copyPixelBuffer(forItemTime: time, itemTimeForDisplay: nil)
      else { return nil }
    return buffer
  }

  func doThingsWithFaces() {
    guard let buffer = getNewFrame() else { return }
    // some CoreML / Vision things on that.
    // There are numerous examples with this
  }
}
于 2017-06-28T22:49:06.050 回答