8

我有一系列希望使用 MTKView 呈现的 UI 图像(由来自服务器的传入 jpeg 数据制成)。问题是它与 GLKView 相比太慢了。当我在 MTKView 中显示一系列图像但在 GLKView 中没有延迟时,会有很多缓冲和延迟。

这是MTKView显示代码:

 private lazy var context: CIContext = {
    return CIContext(mtlDevice: self.device!, options: [CIContextOption.workingColorSpace : NSNull()])
}()

 var ciImg: CIImage? {
    didSet {
        syncQueue.sync {
            internalCoreImage = ciImg
        }
    }
}

 func displayCoreImage(_ ciImage: CIImage) {
    self.ciImg = ciImage
}

  override func draw(_ rect: CGRect) {
     var ciImage: CIImage?
    
    syncQueue.sync {
        ciImage = internalCoreImage
    }

    drawCIImage(ciImg)

}

 func drawCIImage(_ ciImage:CIImage?) {
    guard let image = ciImage,
        let currentDrawable = currentDrawable,
        let commandBuffer = commandQueue?.makeCommandBuffer()
        else {
            return
    }
    let currentTexture = currentDrawable.texture
    let drawingBounds = CGRect(origin: .zero, size: drawableSize)
    
    let scaleX = drawableSize.width / image.extent.width
    let scaleY = drawableSize.height / image.extent.height
    let scaledImage = image.transformed(by: CGAffineTransform(scaleX: scaleX, y: scaleY))
    
    context.render(scaledImage, to: currentTexture, commandBuffer: commandBuffer, bounds: drawingBounds, colorSpace: CGColorSpaceCreateDeviceRGB())
   
    
    commandBuffer.present(currentDrawable)
    commandBuffer.commit()
}

这是 GLKView 的代码,它没有延迟且快速:

private var videoPreviewView:GLKView!
private var eaglContext:EAGLContext!
private var context:CIContext!

override init(frame: CGRect) {
    super.init(frame: frame)
    initCommon()
}

required init?(coder: NSCoder) {
    super.init(coder: coder)
    initCommon()
}

func initCommon() {
    eaglContext = EAGLContext(api: .openGLES3)!
    videoPreviewView = GLKView(frame: self.bounds, context: eaglContext)
    context = CIContext(eaglContext: eaglContext, options: nil)
    
    self.addSubview(videoPreviewView)
    
    videoPreviewView.bindDrawable()
    videoPreviewView.clipsToBounds = true
    videoPreviewView.autoresizingMask = [.flexibleWidth, .flexibleHeight]
}

 func displayCoreImage(_ ciImage: CIImage) {
    let sourceExtent = ciImage.extent
       
    let sourceAspect = sourceExtent.size.width / sourceExtent.size.height
    
    let videoPreviewWidth = CGFloat(videoPreviewView.drawableWidth)
    let videoPreviewHeight = CGFloat(videoPreviewView.drawableHeight)
    
    let previewAspect = videoPreviewWidth/videoPreviewHeight
       
       // we want to maintain the aspect radio of the screen size, so we clip the video image
    var drawRect = sourceExtent
       
    if sourceAspect > previewAspect
    {
        // use full height of the video image, and center crop the width
        drawRect.origin.x = drawRect.origin.x + (drawRect.size.width - drawRect.size.height * previewAspect) / 2.0
        drawRect.size.width = drawRect.size.height * previewAspect
    }
    else
    {
        // use full width of the video image, and center crop the height
        drawRect.origin.y = drawRect.origin.y + (drawRect.size.height - drawRect.size.width / previewAspect) / 2.0
        drawRect.size.height = drawRect.size.width / previewAspect
    }
       
    var videoRect = CGRect(x: 0, y: 0, width: videoPreviewWidth, height: videoPreviewHeight)
    
    if sourceAspect < previewAspect
       {
           // use full height of the video image, and center crop the width
           videoRect.origin.x += (videoRect.size.width - videoRect.size.height * sourceAspect) / 2.0;
           videoRect.size.width = videoRect.size.height * sourceAspect;
       }
       else
       {
           // use full width of the video image, and center crop the height
           videoRect.origin.y += (videoRect.size.height - videoRect.size.width / sourceAspect) / 2.0;
           videoRect.size.height = videoRect.size.width / sourceAspect;
       }
       
    videoPreviewView.bindDrawable()
       
    if eaglContext != EAGLContext.current() {
        EAGLContext.setCurrent(eaglContext)
    }
       
    // clear eagl view to black
    glClearColor(0, 0, 0, 1)
    glClear(GLbitfield(GL_COLOR_BUFFER_BIT))
       
    glEnable(GLenum(GL_BLEND))
    glBlendFunc(GLenum(GL_ONE), GLenum(GL_ONE_MINUS_SRC_ALPHA))
           
    context.draw(ciImage, in: videoRect, from: sourceExtent)
    videoPreviewView.display()
}

我真的很想找出金属代码的瓶颈在哪里。Metal 不能每秒显示 20 次 640x360 UIImages 吗?

编辑:将 MTKView 的 colorPixelFormat 设置为 rgba16Float 解决了延迟问题,但再现的颜色不准确。所以看起来像核心图像的色彩空间转换问题。但是GLKView是怎么渲染这么快的延迟却不是MTKView的呢?

EDIT2:将 MTKView 的 colorPixelFormat 设置为 bgra_xr10 主要解决了延迟问题。但问题是我们不能使用这种像素颜色格式的 CIRenderDestination API。

仍然想知道 GLKView/CIContext 如何如此快速地渲染图像而没有任何延迟,但在 MTKView 中,我们需要将 colorPixelFormat 设置为 bgra_xr10 以提高性能。iPad Mini 2 上的设置 bgra_xr10 会导致崩溃:

  -[MTLRenderPipelineDescriptorInternal validateWithDevice:], line 2590: error 'pixelFormat, for color render target(0), is not a valid MTLPixelFormat.
4

0 回答 0