CIImage display MTKView vs GLKView performance

2020-02-06 03:12发布

问题:

I have a series of UIImages (made from incoming jpeg Data from server) that I wish to render using MTKView. Problem is it is too slow compared to GLKView. There is lot of buffering and delay when I have a series of images to display in MTKView but no delay in GLKView.

Here is MTKView display code:

 private lazy var context: CIContext = {
    return CIContext(mtlDevice: self.device!, options: [CIContextOption.workingColorSpace : NSNull()])
}()

 var ciImg: CIImage? {
    didSet {
        syncQueue.sync {
            internalCoreImage = ciImg
        }
    }
}

 func displayCoreImage(_ ciImage: CIImage) {
    self.ciImg = ciImage
}

  override func draw(_ rect: CGRect) {
     var ciImage: CIImage?

    syncQueue.sync {
        ciImage = internalCoreImage
    }

    drawCIImage(ciImg)

}

 func drawCIImage(_ ciImage:CIImage?) {
    guard let image = ciImage,
        let currentDrawable = currentDrawable,
        let commandBuffer = commandQueue?.makeCommandBuffer()
        else {
            return
    }
    let currentTexture = currentDrawable.texture
    let drawingBounds = CGRect(origin: .zero, size: drawableSize)

    let scaleX = drawableSize.width / image.extent.width
    let scaleY = drawableSize.height / image.extent.height
    let scaledImage = image.transformed(by: CGAffineTransform(scaleX: scaleX, y: scaleY))

    context.render(scaledImage, to: currentTexture, commandBuffer: commandBuffer, bounds: drawingBounds, colorSpace: CGColorSpaceCreateDeviceRGB())


    commandBuffer.present(currentDrawable)
    commandBuffer.commit()
}

And here is code for GLKView which is lag free and fast:

private var videoPreviewView:GLKView!
private var eaglContext:EAGLContext!
private var context:CIContext!

override init(frame: CGRect) {
    super.init(frame: frame)
    initCommon()
}

required init?(coder: NSCoder) {
    super.init(coder: coder)
    initCommon()
}

func initCommon() {
    eaglContext = EAGLContext(api: .openGLES3)!
    videoPreviewView = GLKView(frame: self.bounds, context: eaglContext)
    context = CIContext(eaglContext: eaglContext, options: nil)

    self.addSubview(videoPreviewView)

    videoPreviewView.bindDrawable()
    videoPreviewView.clipsToBounds = true
    videoPreviewView.autoresizingMask = [.flexibleWidth, .flexibleHeight]
}

 func displayCoreImage(_ ciImage: CIImage) {
    let sourceExtent = ciImage.extent

    let sourceAspect = sourceExtent.size.width / sourceExtent.size.height

    let videoPreviewWidth = CGFloat(videoPreviewView.drawableWidth)
    let videoPreviewHeight = CGFloat(videoPreviewView.drawableHeight)

    let previewAspect = videoPreviewWidth/videoPreviewHeight

       // we want to maintain the aspect radio of the screen size, so we clip the video image
    var drawRect = sourceExtent

    if sourceAspect > previewAspect
    {
        // use full height of the video image, and center crop the width
        drawRect.origin.x = drawRect.origin.x + (drawRect.size.width - drawRect.size.height * previewAspect) / 2.0
        drawRect.size.width = drawRect.size.height * previewAspect
    }
    else
    {
        // use full width of the video image, and center crop the height
        drawRect.origin.y = drawRect.origin.y + (drawRect.size.height - drawRect.size.width / previewAspect) / 2.0
        drawRect.size.height = drawRect.size.width / previewAspect
    }

    var videoRect = CGRect(x: 0, y: 0, width: videoPreviewWidth, height: videoPreviewHeight)

    if sourceAspect < previewAspect
       {
           // use full height of the video image, and center crop the width
           videoRect.origin.x += (videoRect.size.width - videoRect.size.height * sourceAspect) / 2.0;
           videoRect.size.width = videoRect.size.height * sourceAspect;
       }
       else
       {
           // use full width of the video image, and center crop the height
           videoRect.origin.y += (videoRect.size.height - videoRect.size.width / sourceAspect) / 2.0;
           videoRect.size.height = videoRect.size.width / sourceAspect;
       }

    videoPreviewView.bindDrawable()

    if eaglContext != EAGLContext.current() {
        EAGLContext.setCurrent(eaglContext)
    }

    // clear eagl view to black
    glClearColor(0, 0, 0, 1)
    glClear(GLbitfield(GL_COLOR_BUFFER_BIT))

    glEnable(GLenum(GL_BLEND))
    glBlendFunc(GLenum(GL_ONE), GLenum(GL_ONE_MINUS_SRC_ALPHA))

    context.draw(ciImage, in: videoRect, from: sourceExtent)
    videoPreviewView.display()
}

I really want to find out where is bottleneck in Metal code. Is Metal not capable of displaying 640x360 UIImages 20 times per second?

EDIT: Setting colorPixelFormat of MTKView to rgba16Float solves the delay issue, but the reproduced colors are not accurate. So seems like colorspace conversion issue with core image. But how does GLKView renders so fast delay but not MTKView?

EDIT2: Setting colorPixelFormat of MTKView to bgra_xr10 mostly solves the delay issue. But the problem is we can not use CIRenderDestination API with this pixel color format.

Still wondering how GLKView/CIContext render the images so quickly without any delay but in MTKView we need to set colorPixelFormat to bgra_xr10 for increasing performance. And settings bgra_xr10 on iPad Mini 2 causes a crash:

  -[MTLRenderPipelineDescriptorInternal validateWithDevice:], line 2590: error 'pixelFormat, for color render target(0), is not a valid MTLPixelFormat.