目标是使用 Swift 在设备上捕获全屏视频。在下面的代码中,视频捕获似乎发生在全屏(录制相机预览时使用全屏),但视频的渲染发生在不同的分辨率。具体来说,对于 5S,看起来捕获发生在 ,320x568
但渲染发生在320x480
.
如何捕获和渲染全屏视频?
视频捕获代码:
private func initPBJVision() {
// Store PBJVision in var for convenience
let vision = PBJVision.sharedInstance()
// Configure PBJVision
vision.delegate = self
vision.cameraMode = PBJCameraMode.Video
vision.cameraOrientation = PBJCameraOrientation.Portrait
vision.focusMode = PBJFocusMode.ContinuousAutoFocus
vision.outputFormat = PBJOutputFormat.Preset
vision.cameraDevice = PBJCameraDevice.Back
// Let taps start/pause recording
let tapHandler = UITapGestureRecognizer(target: self, action: "doTap:")
view.addGestureRecognizer(tapHandler)
// Log status
print("Configured PBJVision")
}
private func startCameraPreview() {
// Store PBJVision in var for convenience
let vision = PBJVision.sharedInstance()
// Connect PBJVision camera preview to <videoView>
// -- Get preview width
let deviceWidth = CGRectGetWidth(view.frame)
let deviceHeight = CGRectGetHeight(view.frame)
// -- Configure PBJVision's preview layer
let previewLayer = vision.previewLayer
previewLayer.frame = CGRectMake(0, 0, deviceWidth, deviceHeight)
previewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill
...
}
视频渲染代码:
func exportVideo(fileUrl: NSURL) {
// Create main composition object
let videoAsset = AVURLAsset(URL: fileUrl, options: nil)
let mainComposition = AVMutableComposition()
let compositionVideoTrack = mainComposition.addMutableTrackWithMediaType(AVMediaTypeVideo, preferredTrackID: CMPersistentTrackID(kCMPersistentTrackID_Invalid))
let compositionAudioTrack = mainComposition.addMutableTrackWithMediaType(AVMediaTypeAudio, preferredTrackID: CMPersistentTrackID(kCMPersistentTrackID_Invalid))
// -- Extract and apply video & audio tracks to composition
let sourceVideoTrack = videoAsset.tracksWithMediaType(AVMediaTypeVideo)[0]
let sourceAudioTrack = videoAsset.tracksWithMediaType(AVMediaTypeAudio)[0]
do {
try compositionVideoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), ofTrack: sourceVideoTrack, atTime: kCMTimeZero)
} catch {
print("Error with insertTimeRange. Video error: \(error).")
}
do {
try compositionAudioTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), ofTrack: sourceAudioTrack, atTime: kCMTimeZero)
} catch {
print("Error with insertTimeRange. Audio error: \(error).")
}
// Add text to video
// -- Create video composition object
let renderSize = compositionVideoTrack.naturalSize
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = renderSize
videoComposition.frameDuration = CMTimeMake(Int64(1), Int32(videoFrameRate))
// -- Add instruction to video composition object
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, videoAsset.duration)
let videoLayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: compositionVideoTrack)
instruction.layerInstructions = [videoLayerInstruction]
videoComposition.instructions = [instruction]
// -- Define video frame
let videoFrame = CGRectMake(0, 0, renderSize.width, renderSize.height)
print("Video Frame: \(videoFrame)") // <-- Prints frame of 320x480 so render size already wrong here
...