1

这是我的代码

import UIKit
import AVFoundation

class ViewController: UIViewController {

    @IBOutlet weak var cameraView: UIView!

    var image: UIImage!

    var captureSession = AVCaptureSession()
    var backCamera: AVCaptureDevice?
    var frontCamera: AVCaptureDevice?
    var currentCamera: AVCaptureDevice?

    var photoOutput: AVCapturePhotoOutput?

    var cameraPreviewLayer: AVCaptureVideoPreviewLayer?

    override func viewDidLoad() {
        super.viewDidLoad()
        // Do any additional setup after loading the view, typically from a nib.


    }

    override func viewDidAppear(_ animated: Bool) {

        setupCaptureSession()
        setupDevice()
        setupInputOutput()
        setupPreviewLayer()
        startRunningCaptureSession()

    }

    @IBAction func cameraButton_Tab(_ sender: Any) {

        let settings = AVCapturePhotoSettings()

//        performSegue(withIdentifier: "showPhoto_Segue", sender: nil)
        photoOutput?.capturePhoto(with: settings, delegate: self)

    }

    func setupCaptureSession() {

        captureSession.sessionPreset = AVCaptureSession.Preset.photo

    }

    func setupDevice() {

        let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: AVMediaType.video, position: AVCaptureDevice.Position.unspecified)

        let devices = deviceDiscoverySession.devices

        for device in devices {

            if device.position == AVCaptureDevice.Position.back {

                backCamera = device

            }else if device.position == AVCaptureDevice.Position.front{

                frontCamera = device

            }

        }

        currentCamera = backCamera

    }

    func setupInputOutput() {

        do{

            let captureDeviceInput = try AVCaptureDeviceInput(device: currentCamera!)
            captureSession.addInput(captureDeviceInput)
            photoOutput = AVCapturePhotoOutput()
            photoOutput?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey : AVVideoCodecType.jpeg])], completionHandler: nil)
            captureSession.addOutput(photoOutput!)

        }catch {
            print(error)
        }

    }

    func setupPreviewLayer() {

        cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
        cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
        cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
        cameraPreviewLayer!.frame = self.cameraView.bounds
        self.cameraView.layer.insertSublayer(cameraPreviewLayer!, at: 0)

    }

    func startRunningCaptureSession() {

        captureSession.startRunning()

    }

}

extension ViewController: AVCapturePhotoCaptureDelegate {

    func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
        if let imageData = photo.fileDataRepresentation(){
            image = UIImage(data: imageData)

        }
    }

}

在此处输入图像描述

查看图像,我想保存背景颜色为黄色的图像,我可以通过它看到相机

但是我保存图像,似乎保存了整个视图,而不是正方形。

我使 UIImageView 与黄色 UIView 大小相同并保存输出,它需要整个视图捕获并调整其大小。

就像用挤压把矩形变成正方形

我怎么不能只捕捉黄色背景大小并保存?

4

1 回答 1

1

didFinishProcessingPhoto将返回完整的图像,就像相机看到的一样。您不会直接显示在您的PreviewLayer. 因此,为了获得UIImage显示的PreviewLayer,您可以调整捕获的图像的大小。

好吧,调整大小也可以通过两种方式完成:一种是保持纵横比,另一种是传递确切的大小。我建议使用纵横比,因为它可以确保您的图像不会被任何尺寸挤压或拉伸,而传递错误的尺寸将无法满足您的要求。

调整大小UIImage传递 new CGSize

extension UIImage {
    func scaleImage(toSize newSize: CGSize) -> UIImage? {
        var newImage: UIImage?
        let newRect = CGRect(x: 0, y: 0, width: newSize.width, height: newSize.height).integral
        UIGraphicsBeginImageContextWithOptions(newSize, false, 0)
        if let context = UIGraphicsGetCurrentContext(), let cgImage = self.cgImage {
            context.interpolationQuality = .high
            let flipVertical = CGAffineTransform(a: 1, b: 0, c: 0, d: -1, tx: 0, ty: newSize.height)
            context.concatenate(flipVertical)
            context.draw(cgImage, in: newRect)
            if let img = context.makeImage() {
                newImage = UIImage(cgImage: img)
            }
            UIGraphicsEndImageContext()
        }
        return newImage
    }
}

用法: capturedImage.scaleImage(toSize: CGSize(width: 300, height: 300))

调整大小UIImage保持纵横比:

extension UIImage {
    func scaleImage(toWidth newWidth: CGFloat) -> UIImage {
        let scale = newWidth / self.size.width
        let newHeight = self.size.height * scale
        let newSize = CGSize(width: newWidth, height: newHeight)

        let renderer = UIGraphicsImageRenderer(size: newSize)

        let image = renderer.image { (context) in
            self.draw(in: CGRect(origin: CGPoint(x: 0, y: 0), size: newSize))
        }
        return image
    }
}

用法: capturedImage.scaleImage(toWidth: 300)

参考:将 UIImage 调整为 200x200pt/px

更新:

将以下方法保留在您的代码中:

@IBAction func cameraButton_Tab(_ sender: Any) {
    let settings = AVCapturePhotoSettings()
    photoOutput?.capturePhoto(with: settings, delegate: self)
}

extension ViewController: AVCapturePhotoCaptureDelegate {

    func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
        if let imageData = photo.fileDataRepresentation(){
            let capturedImage = UIImage(data: imageData)
            let cropImage = capturedImage.scaleImage(toWidth: cameraPreviewLayer!.frame.size.width) //It will return the Image size of Camera Preview
        }
    }
}
于 2019-01-25T04:42:10.737 回答