2

我知道这个问题得到了很多回答,但我的情况似乎有所不同。我正在尝试编写一个顶级函数,我可以随时截取我的应用程序的屏幕截图,无论是 openGLES 还是 UIKit,我将无法访问底层类来进行任何更改。

我一直在尝试的代码适用于 UIKit,但会为 OpenGLES 部件返回黑屏

CGSize imageSize = [[UIScreen mainScreen] bounds].size;
    if (NULL != UIGraphicsBeginImageContextWithOptions)
        UIGraphicsBeginImageContextWithOptions(imageSize, NO, 0);
    else
        UIGraphicsBeginImageContext(imageSize);

    CGContextRef context = UIGraphicsGetCurrentContext();

    // Iterate over every window from back to front
    for (UIWindow *window in [[UIApplication sharedApplication] windows])
    {
        if (![window respondsToSelector:@selector(screen)] || [window screen] == [UIScreen mainScreen])
        {
            // -renderInContext: renders in the coordinate space of the layer,
            // so we must first apply the layer's geometry to the graphics context
            CGContextSaveGState(context);
            // Center the context around the window's anchor point
            CGContextTranslateCTM(context, [window center].x, [window center].y);
            // Apply the window's transform about the anchor point
            CGContextConcatCTM(context, [window transform]);
            // Offset by the portion of the bounds left of and above the anchor point
            CGContextTranslateCTM(context,
                                  -[window bounds].size.width * [[window layer] anchorPoint].x,
                                  -[window bounds].size.height * [[window layer] anchorPoint].y);


            for (UIView *subview in window.subviews)
            {
                CAEAGLLayer *eaglLayer = (CAEAGLLayer *) subview.layer;
                if([eaglLayer respondsToSelector:@selector(drawableProperties)]) {
                    NSLog(@"reponds");
                    /*eaglLayer.drawableProperties = @{
                                                     kEAGLDrawablePropertyRetainedBacking: [NSNumber numberWithBool:YES],
                                                     kEAGLDrawablePropertyColorFormat: kEAGLColorFormatRGBA8
                                                     };*/
                    UIImageView *glImageView = [[UIImageView alloc] initWithImage:[self snapshotx:subview]];
                    glImageView.transform = CGAffineTransformMakeScale(1, -1);
                    [glImageView.layer renderInContext:context];

                    //CGImageRef iref = [self snapshot:subview withContext:context];
                    //CGContextDrawImage(context, CGRectMake(0.0, 0.0, 640, 960), iref);

                }

                [[window layer] renderInContext:context];

                // Restore the context
                CGContextRestoreGState(context);
            }
        }
    }

    // Retrieve the screenshot image
    UIImage *image = UIGraphicsGetImageFromCurrentImageContext();

    UIGraphicsEndImageContext();

    return image;

- (UIImage*)snapshotx:(UIView*)eaglview
{
    GLint backingWidth, backingHeight;

    //glBindRenderbufferOES(GL_RENDERBUFFER_OES, _colorRenderbuffer);
    //don't know how to access the renderbuffer if i can't directly access the below code

    // Get the size of the backing CAEAGLLayer
    glGetRenderbufferParameterivOES(GL_RENDERBUFFER_OES, GL_RENDERBUFFER_WIDTH_OES, &backingWidth);
    glGetRenderbufferParameterivOES(GL_RENDERBUFFER_OES, GL_RENDERBUFFER_HEIGHT_OES, &backingHeight);

    NSInteger x = 0, y = 0, width = backingWidth, height = backingHeight;
    NSInteger dataLength = width * height * 4;
    GLubyte *data = (GLubyte*)malloc(dataLength * sizeof(GLubyte));

    // Read pixel data from the framebuffer
    glPixelStorei(GL_PACK_ALIGNMENT, 4);
    glReadPixels(x, y, width, height, GL_RGBA, GL_UNSIGNED_BYTE, data);

    // Create a CGImage with the pixel data
    // If your OpenGL ES content is opaque, use kCGImageAlphaNoneSkipLast to ignore the alpha channel
    // otherwise, use kCGImageAlphaPremultipliedLast
    CGDataProviderRef ref = CGDataProviderCreateWithData(NULL, data, dataLength, NULL);
    CGColorSpaceRef colorspace = CGColorSpaceCreateDeviceRGB();
    CGImageRef iref = CGImageCreate (
                                     width,
                                     height,
                                     8,
                                     32,
                                     width * 4,
                                     colorspace,
                                     // Fix from Apple implementation
                                     // (was: kCGBitmapByteOrder32Big | kCGImageAlphaPremultipliedLast).
                                     kCGBitmapByteOrderDefault,
                                     ref,
                                     NULL,
                                     true,
                                     kCGRenderingIntentDefault
                                     );

    // OpenGL ES measures data in PIXELS
    // Create a graphics context with the target size measured in POINTS
    NSInteger widthInPoints, heightInPoints;
    if (NULL != UIGraphicsBeginImageContextWithOptions)
    {
        // On iOS 4 and later, use UIGraphicsBeginImageContextWithOptions to take the scale into consideration
        // Set the scale parameter to your OpenGL ES view's contentScaleFactor
        // so that you get a high-resolution snapshot when its value is greater than 1.0
        CGFloat scale = eaglview.contentScaleFactor;
        widthInPoints = width / scale;
        heightInPoints = height / scale;
        UIGraphicsBeginImageContextWithOptions(CGSizeMake(widthInPoints, heightInPoints), NO, scale);
    }
    else {
        // On iOS prior to 4, fall back to use UIGraphicsBeginImageContext
        widthInPoints = width;
        heightInPoints = height;
        UIGraphicsBeginImageContext(CGSizeMake(widthInPoints, heightInPoints));
    }

    CGContextRef cgcontext = UIGraphicsGetCurrentContext();

    // UIKit coordinate system is upside down to GL/Quartz coordinate system
    // Flip the CGImage by rendering it to the flipped bitmap context
    // The size of the destination area is measured in POINTS
    CGContextSetBlendMode(cgcontext, kCGBlendModeCopy);
    CGContextDrawImage(cgcontext, CGRectMake(0.0, 0.0, widthInPoints, heightInPoints), iref);

    // Retrieve the UIImage from the current context
    UIImage *image = UIGraphicsGetImageFromCurrentImageContext();

    UIGraphicsEndImageContext();

    // Clean up
    free(data);
    CFRelease(ref);
    CFRelease(colorspace);
    CGImageRelease(iref);

    return image;
}

关于如何在不修改应用程序其余部分的类的情况下混合两者的任何建议?

谢谢!

4

2 回答 2

1

我看到你在那里尝试做的事情,这并不是一个糟糕的概念。不过似乎确实存在一个大问题:您不能glReadPixels随时打电话。首先,您应该确保缓冲区充满了您真正需要的数据(像素),其次是应该在其 GL 部分正在处理的同一线程上调用它......

如果 GL 视图不是您的,您可能会在调用该屏幕截图方法时遇到一些大麻烦,您需要调用一些方法来触发绑定其内部上下文,如果它正在动画,您必须知道循环何时完成以确保您收到的像素与视图上显示的像素相同。

无论如何,如果您通过了所有这些,您可能仍然需要“跳过”不同的线程或需要等待一个循环完成。在这种情况下,我建议您使用返回屏幕截图图像的块,该图像应该作为方法参数传递,这样您就可以在返回时捕获它。话虽这么说,如果您可以覆盖 GL 视图上的某些方法,以便能够通过回调块返回屏幕截图图像并编写一些递归系统,那将是最好的。

总而言之,您需要预测多线程,设置上下文,绑定正确的帧缓冲区,等待所有内容被渲染。这一切都可能导致无法创建一个屏幕截图方法,该方法可以简单地适用于任何应用程序、视图、系统,而不会重载某些内部方法。

请注意,您根本不允许在您的应用程序中制作完整的屏幕截图(例如同时按下主页和锁定按钮)。至于 UIView 部分如此容易从中创建图像是因为 UIView 被重绘到独立于屏幕的图形上下文中;好像您可以使用一些 GL 管道并将其绑定到您自己的缓冲区和上下文并绘制它,这将导致能够独立获取其屏幕截图并且可以在任何线程上执行。

于 2013-06-10T07:01:48.157 回答
0

实际上,我正在尝试做类似的事情:当我解决它时,我会完整地发布,但简而言之:

  • 使用你的 superview 层的 renderInContext 方法
  • 在使用 openGL 的子视图中,实现 layer delegate 的 drawLayer:inContext: 方法
  • 要将视图渲染到上下文中,请使用 CVOpenGLESTextureCacheRef

您的超级视图层将在每个子层上调用 renderInContext: - 通过实现委托方法,您的 GLView 会响应它的层。

使用纹理缓存比 glReadPixels 快得多:这可能是一个瓶颈。

山姆

于 2013-10-30T15:34:54.333 回答