14

现在我正在测试 ARKit/SceneKit 的实现。屏幕的基本渲染有点工作,所以我想尝试将我在屏幕上看到的内容录制成视频。

只是为了录制 Scene Kit,我发现了这个 Gist

//
//  ViewController.swift
//  SceneKitToVideo
//
//  Created by Lacy Rhoades on 11/29/16.
//  Copyright © 2016 Lacy Rhoades. All rights reserved.
//

import SceneKit
import GPUImage
import Photos

class ViewController: UIViewController {

    // Renders a scene (and shows it on the screen)
    var scnView: SCNView!

    // Another renderer
    var secondaryRenderer: SCNRenderer?

    // Abducts image data via an OpenGL texture
    var textureInput: GPUImageTextureInput?

    // Recieves image data from textureInput, shows it on screen
    var gpuImageView: GPUImageView!

    // Recieves image data from the textureInput, writes to a file
    var movieWriter: GPUImageMovieWriter?

    // Where to write the output file
    let path = NSTemporaryDirectory().appending("tmp.mp4")

    // Output file dimensions
    let videoSize = CGSize(width: 800.0, height: 600.0)

    // EAGLContext in the sharegroup with GPUImage
    var eaglContext: EAGLContext!

    override func viewDidLoad() {
        super.viewDidLoad()

        let group = GPUImageContext.sharedImageProcessing().context.sharegroup
        self.eaglContext = EAGLContext(api: .openGLES2, sharegroup: group )
        let options = ["preferredRenderingAPI": SCNRenderingAPI.openGLES2]

        // Main view with 3d in it
        self.scnView = SCNView(frame: CGRect.zero, options: options)
        self.scnView.preferredFramesPerSecond = 60
        self.scnView.eaglContext = eaglContext
        self.scnView.translatesAutoresizingMaskIntoConstraints = false
        self.view.addSubview(self.scnView)

        // Secondary renderer for rendering to an OpenGL framebuffer
        self.secondaryRenderer = SCNRenderer(context: eaglContext, options: options)

        // Output of the GPUImage pipeline
        self.gpuImageView = GPUImageView()
        self.gpuImageView.translatesAutoresizingMaskIntoConstraints = false
        self.view.addSubview(self.gpuImageView)

        self.setupConstraints()

        self.setupScene()

        self.setupMovieWriter()

        DispatchQueue.main.async {
            self.setupOpenGL()
        }
    }

    func setupConstraints() {
        let relativeWidth: CGFloat = 0.8

        self.view.addConstraint(NSLayoutConstraint(item: self.scnView, attribute: .width, relatedBy: .equal, toItem: self.view, attribute: .width, multiplier: relativeWidth, constant: 0))
        self.view.addConstraint(NSLayoutConstraint(item: self.scnView, attribute: .centerX, relatedBy: .equal, toItem: self.view, attribute: .centerX, multiplier: 1, constant: 0))

        self.view.addConstraint(NSLayoutConstraint(item: self.gpuImageView, attribute: .width, relatedBy: .equal, toItem: self.view, attribute: .width, multiplier: relativeWidth, constant: 0))
        self.view.addConstraint(NSLayoutConstraint(item: self.gpuImageView, attribute: .centerX, relatedBy: .equal, toItem: self.view, attribute: .centerX, multiplier: 1, constant: 0))

        self.view.addConstraints(NSLayoutConstraint.constraints(withVisualFormat: "V:|-(==30.0)-[scnView]-(==30.0)-[gpuImageView]", options: [], metrics: [:], views: ["gpuImageView": gpuImageView, "scnView": scnView]))

        let videoRatio = self.videoSize.width / self.videoSize.height
        self.view.addConstraint(NSLayoutConstraint(item: self.scnView, attribute: .width, relatedBy: .equal, toItem: self.scnView, attribute: .height, multiplier: videoRatio, constant: 1))
        self.view.addConstraint(NSLayoutConstraint(item: self.gpuImageView, attribute: .width, relatedBy: .equal, toItem: self.gpuImageView, attribute: .height, multiplier: videoRatio, constant: 1))
    }

    override func viewDidAppear(_ animated: Bool) {
        self.cameraBoxNode.runAction(
            SCNAction.repeatForever(
                SCNAction.rotateBy(x: 0.0, y: -2 * CGFloat.pi, z: 0.0, duration: 8.0)
            )
        )

        self.scnView.isPlaying = true

        Timer.scheduledTimer(withTimeInterval: 5.0, repeats: false, block: {
            timer in
            self.startRecording()
        })
    }

    var scene: SCNScene!
    var geometryNode: SCNNode!
    var cameraNode: SCNNode!
    var cameraBoxNode: SCNNode!
    var imageMaterial: SCNMaterial!
    func setupScene() {
        self.imageMaterial = SCNMaterial()
        self.imageMaterial.isDoubleSided = true
        self.imageMaterial.diffuse.contentsTransform = SCNMatrix4MakeScale(-1, 1, 1)
        self.imageMaterial.diffuse.wrapS = .repeat
        self.imageMaterial.diffuse.contents = UIImage(named: "pano_01")

        self.scene = SCNScene()

        let sphere = SCNSphere(radius: 100.0)
        sphere.materials = [imageMaterial!]
        self.geometryNode = SCNNode(geometry: sphere)
        self.geometryNode.position = SCNVector3Make(0.0, 0.0, 0.0)
        scene.rootNode.addChildNode(self.geometryNode)

        self.cameraNode = SCNNode()
        self.cameraNode.camera = SCNCamera()
        self.cameraNode.camera?.yFov = 72.0
        self.cameraNode.position = SCNVector3Make(0, 0, 0)
        self.cameraNode.eulerAngles = SCNVector3Make(0.0, 0.0, 0.0)

        self.cameraBoxNode = SCNNode()
        self.cameraBoxNode.addChildNode(self.cameraNode)
        scene.rootNode.addChildNode(self.cameraBoxNode)

        self.scnView.scene = scene
        self.scnView.backgroundColor = UIColor.darkGray
        self.scnView.autoenablesDefaultLighting = true
    }

    func setupMovieWriter() {
        let _ = FileUtil.mkdirUsingFile(path)
        let _ = FileUtil.unlink(path)
        let url = URL(fileURLWithPath: path)
        self.movieWriter = GPUImageMovieWriter(movieURL: url, size: self.videoSize)
    }

    let glRenderQueue = GPUImageContext.sharedContextQueue()!
    var outputTexture: GLuint = 0
    var outputFramebuffer: GLuint = 0
    func setupOpenGL() {
        self.glRenderQueue.sync {
            let context = EAGLContext.current()
            if context != self.eaglContext {
                EAGLContext.setCurrent(self.eaglContext)
            }

            glGenFramebuffers(1, &self.outputFramebuffer)
            glBindFramebuffer(GLenum(GL_FRAMEBUFFER), self.outputFramebuffer)

            glGenTextures(1, &self.outputTexture)
            glBindTexture(GLenum(GL_TEXTURE_2D), self.outputTexture)
        }

        // Pipe the texture into GPUImage-land
        self.textureInput = GPUImageTextureInput(texture: self.outputTexture, size: self.videoSize)

        let rotate = GPUImageFilter()
        rotate.setInputRotation(kGPUImageFlipVertical, at: 0)
        self.textureInput?.addTarget(rotate)
        rotate.addTarget(self.gpuImageView)

        if let writer = self.movieWriter {
            rotate.addTarget(writer)
        }

        // Call me back on every render
        self.scnView.delegate = self
    }

    func renderToFramebuffer(atTime time: TimeInterval) {
        self.glRenderQueue.sync {
            let context = EAGLContext.current()
            if context != self.eaglContext {
                EAGLContext.setCurrent(self.eaglContext)
            }

            objc_sync_enter(self.eaglContext)

            let width = GLsizei(self.videoSize.width)
            let height = GLsizei(self.videoSize.height)

            glBindFramebuffer(GLenum(GL_FRAMEBUFFER), self.outputFramebuffer)
            glBindTexture(GLenum(GL_TEXTURE_2D), self.outputTexture)

            glTexImage2D(GLenum(GL_TEXTURE_2D), 0, GL_RGBA, width, height, 0, GLenum(GL_RGBA), GLenum(GL_UNSIGNED_BYTE), nil)

            glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_MAG_FILTER), GL_LINEAR)
            glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_MIN_FILTER), GL_LINEAR)
            glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_WRAP_S), GL_CLAMP_TO_EDGE)
            glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_WRAP_T), GL_CLAMP_TO_EDGE)

            glFramebufferTexture2D(GLenum(GL_FRAMEBUFFER), GLenum(GL_COLOR_ATTACHMENT0), GLenum(GL_TEXTURE_2D), self.outputTexture, 0)

            glViewport(0, 0, width, height)

            glClear(GLbitfield(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT))

            self.secondaryRenderer?.render(atTime: time)

            self.videoBuildingQueue.sync {
                self.textureInput?.processTexture(withFrameTime: CMTime(seconds: time, preferredTimescale: 100000))
            }

            objc_sync_exit(self.eaglContext)
        }

    }

    func startRecording() {
        self.startRecord()
        Timer.scheduledTimer(withTimeInterval: 24.0, repeats: false, block: {
            timer in
            self.stopRecord()
        })
    }

    let videoBuildingQueue = DispatchQueue.global(qos: .default)

    func startRecord() {
        self.videoBuildingQueue.sync {
            //inOrientation: CGAffineTransform(scaleX: 1.0, y: -1.0)
            self.movieWriter?.startRecording()
        }
    }

    var renderStartTime: TimeInterval = 0

    func stopRecord() {
        self.videoBuildingQueue.sync {
            self.movieWriter?.finishRecording(completionHandler: {
                self.saveFileToCameraRoll()
            })
        }
    }

    func saveFileToCameraRoll() {
        assert(FileUtil.fileExists(self.path), "Check for file output")

        DispatchQueue.global(qos: .utility).async {
            PHPhotoLibrary.shared().performChanges({
                PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: URL(fileURLWithPath: self.path))
            }) { (done, err) in
                if err != nil {
                    print("Error creating video file in library")
                    print(err.debugDescription)
                } else {
                    print("Done writing asset to the user's photo library")
                }
            }
        }
    }

}

extension ViewController: SCNSceneRendererDelegate {
    func renderer(_ renderer: SCNSceneRenderer, didRenderScene scene: SCNScene, atTime time: TimeInterval) {
        self.secondaryRenderer?.scene = scene
        self.secondaryRenderer?.pointOfView = (renderer as! SCNView).pointOfView
        self.renderToFramebuffer(atTime: time)
    }
}

但这不会渲染来自设备相机的图像。

所以我也开始寻找方法来做到这一点。到目前为止,我找到了通过访问 ARFrame 将捕获的图像作为 CVImageBufferRef 获取的方法。Apple 的 GLCameraRipple 示例似乎可以帮助我从中获取 OpenGL 纹理。

但我的问题是如何在渲染循环中绘制它。这对于有 OpenGL 经验的人来说可能很明显,但我对 OpenGL 的了解很少,所以我不知道如何将它添加到上面的代码中。

4

6 回答 6

5

您可以使用包含的ReplayKit、ARKit 和 SceneKit 内容记录屏幕上看到的所有内容(或将其直播到 Twitch 等服务) 。

(正如苹果在 WWDC 上指出的那样,ReplayKit 实际上是 iOS 11 中控制中心屏幕录制功能的基础。)

于 2017-06-28T04:14:32.517 回答
2

斯威夫特 5

您可以使用此ARCapture框架从 ARKit 视图录制视频

private var capture: ARCapture?
...

override func viewDidLoad() {
    super.viewDidLoad()

    // Create a new scene
    let scene = SCNScene()
    ...
    // TODO Setup ARSCNView with the scene
    // sceneView.scene = scene
    
    // Setup ARCapture
    capture = ARCapture(view: sceneView)

}

/// "Record" button action handler
@IBAction func recordAction(_ sender: UIButton) {
    capture?.start()
}

/// "Stop" button action handler
@IBAction func stopAction(_ sender: UIButton) {
    capture?.stop({ (status) in
        print("Video exported: \(status)")
    })
}

通话后ARCapture.stop,视频将显示在照片应用程序中。

于 2021-07-05T07:44:50.187 回答
1

不知道您现在是否已设法回答这个问题,但是编写您引用的课程的人 lacyrhoades 在 github 上发布了另一个项目,似乎可以满足您的要求。我用过它,它设法记录带有 AR 对象的 SceneView 以及相机输入。您可以通过以下链接找到它:

https://github.com/lacyrhoades/SCNKit2Video

但是,如果您想将其与 AR 一起使用,则必须将 ARSceneView 配置到他制作的项目中,因为他的项目只是运行 SceneView,而不是带有 AR 的。

希望能帮助到你。

于 2017-07-24T11:00:16.857 回答
1

我刚刚找到了这个框架,叫做 ARVideoKit,它似乎很容易实现,而且它们还有更多的功能,比如捕捉 GIF 和 Live Photos。

框架官方repo为:https ://github.com/AFathi/ARVideoKit/

要安装它,您必须克隆 repo 并将 .framework 文件拖到项目的嵌入式二进制文件中。

然后实现非常简单:

  1. import ARVideoKit在你的UIViewController课上

  2. 创建RecordAR?变量

    var videoRec:RecordAR?

  3. 初始化你的变量viewDidLoad

    videoRec = RecordAR(ARSpriteKit:sceneView)

  4. 准备RecordARviewWillAppear

    videoRec.prepare(configuration)

  5. 开始录制视频

    videoRec.record()

  6. 停止并导出到相机胶卷!

    videoRec.stopAndExport()

看看框架的文档,它支持更多的功能使用!

你可以在这里找到他们的文档:https ://github.com/AFathi/ARVideoKit/wiki

希望有帮助!

于 2017-11-06T04:25:50.277 回答
1

ReplayKit 不是一个好的解决方案,因为您的用户会看到一个丑陋的权限对话框,而且您还必须解决它记录 UI 元素的问题。您对视频分辨率的控制也较少。

相反,您应该使用CVPixelBufferARKit 返回的捕获帧,并像记录从相机捕获的帧一样处理它。假设您需要处理视频帧,您可能还需要使用 Metal 之类的框架来处理绘图。这并不简单。请参阅此处提供的答案: 如何在 RealityKit 中录制视频?

于 2020-07-17T08:31:03.707 回答
-5

如果您可以让您的设备与 Mac 保持连接,那么使用 QuickTime Player 从您的 iOS 设备录制屏幕(和声音)真的很容易。

在 QuickTime 的文件菜单中选择新的电影录制,然后在大红色录制按钮附近的录制对话框中有一个小的下拉箭头,您可以在其中选择音频输入和视频输入。在那里选择您的 i 设备,然后您就可以开始了。

于 2017-06-28T09:00:49.347 回答