4

所以我正在使用 iOS 4.2 为我的应用程序添加缩放和平移。我已经实现了 UIPinchGestureRecognizer 和 UIPanGestureRecognizer 的一个实例。在我看来,其中只有一个是一次识别手势。特别是,后者仅在一根手指向下时做出反应,而前者在第二根手指存在时做出反应。没关系,但它有一些副作用,我认为会导致用户体验质量下降。

当您放下两根手指然后移动其中一根时,图像会按应有的方式展开(放大),但手指下方的像素不再位于手指下方。图像从图像的中心缩放,而不是两个手指之间的中点。而那个中心点本身也在移动。我希望该中心点的移动来决定整个图像的平移。

几乎所有 iOS 应用程序都具有相同的行为,即图像围绕图像中心放大或缩小,而不是手指下方的像素跟踪手指?

在我看来,创建自定义手势识别器是解决此问题的正确设计方法,但在我看来,有人会创建这样的识别器以供商业免费下载和使用。有这样的 UIGestureRecognizer 吗?

4

3 回答 3

8

抱歉,很匆忙,但这是我用于我的一个演示应用程序的代码,它可以在不使用滚动视图的情况下同时缩放和平移。

不要忘记遵守 UIGestureRecognizerDelegate 协议

如果您不能同时进行捏合和平移,可能是因为您缺少此方法:

-(BOOL)gestureRecognizer:(UIGestureRecognizer *)gestureRecognizer shouldRecognizeSimultaneouslyWithGestureRecognizer:(UIGestureRecognizer *)otherGestureRecognizer
{
    return YES;
}

这是完整的源代码:

#import "ViewController.h"
#import <QuartzCore/QuartzCore.h>

@interface ViewController ()

@end

@implementation ViewController

- (void)viewDidLoad
{
    [super viewDidLoad];
    // Do any additional setup after loading the view, typically from a nib.

    isEditing = false;

    photoView = [[UIImageView alloc] initWithFrame:CGRectMake(0, 0, 320, 460)];
    [photoView setImage:[UIImage imageNamed:@"photo.png"]];
    photoView.hidden = YES;

    maskView = [[UIImageView alloc] initWithFrame:CGRectMake(0, 0, 320, 460)];
    [maskView setImage:[UIImage imageNamed:@"maskguide.png"]];
    maskView.hidden = YES;

    displayImage = [[UIImageView alloc] initWithFrame:CGRectMake(0, 0, 320, 460)];

    UIPanGestureRecognizer *panGesture = [[UIPanGestureRecognizer alloc] initWithTarget:self action:@selector(handlePan:)];
    UIPinchGestureRecognizer *pinchGesture = [[UIPinchGestureRecognizer alloc] initWithTarget:self action:@selector(handlePinch:)];

    [panGesture setDelegate:self];
    [pinchGesture setDelegate:self];

    [photoView addGestureRecognizer:panGesture];
    [photoView addGestureRecognizer:pinchGesture];
    [photoView setUserInteractionEnabled:YES];

    [panGesture release];
    [pinchGesture release];

    btnEdit = [[UIButton alloc] initWithFrame:CGRectMake(60, 400, 200, 50)];
    [btnEdit setBackgroundColor:[UIColor blackColor]];
    [btnEdit setTitle:@"Start Editing" forState:UIControlStateNormal];
    [btnEdit addTarget:self action:@selector(toggleEditing) forControlEvents:UIControlEventTouchUpInside];

    [[self view] addSubview:displayImage];
    [[self view] addSubview:photoView];
    [[self view] addSubview:maskView];
    [[self view] addSubview:btnEdit];

    [self updateMaskedImage];
}

- (void)viewDidUnload
{
    [super viewDidUnload];
    // Release any retained subviews of the main view.
}

- (BOOL)shouldAutorotateToInterfaceOrientation:(UIInterfaceOrientation)interfaceOrientation
{
    return (interfaceOrientation != UIInterfaceOrientationPortraitUpsideDown);
}

-(void)dealloc
{
    [btnEdit release];

    [super dealloc];
}

#pragma mark -
#pragma mark Update Masked Image Method
#pragma mark -

-(void)updateMaskedImage
{
    maskView.hidden = YES;

    UIImage *finalImage = 
    [self maskImage:[self captureView:self.view]
           withMask:[UIImage imageNamed:@"mask.png"]];


    maskView.hidden = NO;

    //UIImage *finalImage = [self maskImage:photoView.image withMask:[UIImage imageNamed:@"mask.png"]];

    [displayImage setImage:finalImage];
}

- (UIImage*) maskImage:(UIImage *)image withMask:(UIImage *)maskImage {

    CGImageRef maskRef = maskImage.CGImage; 

    CGImageRef mask = CGImageMaskCreate(CGImageGetWidth(maskRef),
                                        CGImageGetHeight(maskRef),
                                        CGImageGetBitsPerComponent(maskRef),
                                        CGImageGetBitsPerPixel(maskRef),
                                        CGImageGetBytesPerRow(maskRef),
                                        CGImageGetDataProvider(maskRef), NULL, false);

    CGImageRef masked = CGImageCreateWithMask([image CGImage], mask);
    return [UIImage imageWithCGImage:masked];

}

#pragma mark -
#pragma mark Touches Began
#pragma mark -

// adjusts the editing flag to make dragging and drop work
-(void)toggleEditing
{
    if(!isEditing)
    {
        isEditing = true;

        NSLog(@"editing...");

        [btnEdit setTitle:@"Stop Editing" forState:UIControlStateNormal];

        displayImage.hidden = YES;
        photoView.hidden = NO;
        maskView.hidden = NO;
    }
    else
    {
        isEditing = false;

        [self updateMaskedImage];

        NSLog(@"stopped editting");

        [btnEdit setTitle:@"Start Editing" forState:UIControlStateNormal];

        displayImage.hidden = NO;
        photoView.hidden = YES;
        maskView.hidden = YES;
    }
}

/*
-(void)touchesMoved:(NSSet *)touches withEvent:(UIEvent *)event
{   
    if(isEditing)
    {
        UITouch *finger = [touches anyObject];
        CGPoint currentPosition = [finger locationInView:self.view];

        //[maskView setCenter:currentPosition];
        //[photoView setCenter:currentPosition];
        if([touches count] == 1)
        {
            [photoView setCenter:currentPosition];
        }
        else if([touches count] == 2)
        {

        }
    }
}
*/

-(void)handlePan:(UIPanGestureRecognizer *)recognizer
{    
    CGPoint translation = [recognizer translationInView:self.view];
    recognizer.view.center = CGPointMake(recognizer.view.center.x + translation.x, 
                                         recognizer.view.center.y + translation.y);
    [recognizer setTranslation:CGPointMake(0, 0) inView:self.view];
}

-(void)handlePinch:(UIPinchGestureRecognizer *)recognizer
{    
    recognizer.view.transform = CGAffineTransformScale(recognizer.view.transform, recognizer.scale, recognizer.scale);
    recognizer.scale = 1;
}

-(BOOL)gestureRecognizer:(UIGestureRecognizer *)gestureRecognizer shouldRecognizeSimultaneouslyWithGestureRecognizer:(UIGestureRecognizer *)otherGestureRecognizer
{
    return YES;
}

#pragma mark -
#pragma mark Capture Screen Function
#pragma mark -

- (UIImage*)captureView:(UIView *)yourView 
{
    UIGraphicsBeginImageContextWithOptions(yourView.bounds.size, yourView.opaque, 0.0);
    CGContextRef context = UIGraphicsGetCurrentContext();
    [yourView.layer renderInContext:context];
    UIImage *image = UIGraphicsGetImageFromCurrentImageContext();
    UIGraphicsEndImageContext();
    return image;
}

#pragma mark -

@end
于 2012-08-08T02:46:23.953 回答
4

所以我创建了自定义手势识别器,因为没有人给我一个更好的解决方案来达到预期的结果。下面是关键代码片段,允许自定义识别器指示视图应该重新定位的位置以及它的新比例应该是什么,以质心作为平移和缩放效果的中心,以便手指下方的像素完全保持在手指下方时间,除非手指出现旋转,这是不受支持的,我无法阻止他们做出这样的手势。这个手势识别器用两根手指同时平移和缩放。我需要稍后添加一个手指平移的支持,即使两个手指中的一个被抬起。

- (void)touchesMoved:(NSSet *)touches withEvent:(UIEvent *)event
{
    // We can only process if we have two fingers down...
    if ( FirstFinger == nil || SecondFinger == nil )
        return;

    // We do not attempt to determine if the first finger, second finger, or
    // both fingers are the reason for this method call. For this reason, we
    // do not know if either is stale or updated, and thus we cannot rely
    // upon the UITouch's previousLocationInView method. Therefore, we need to
    // cache the latest UITouch's locationInView information each pass.

    // Break down the previous finger coordinates...
    float A0x = PreviousFirstFinger.x;
    float A0y = PreviousFirstFinger.y;
    float A1x = PreviousSecondFinger.x;
    float A1y = PreviousSecondFinger.y;
    // Update our cache with the current fingers for next pass through here...
    PreviousFirstFinger = [FirstFinger locationInView:nil];
    PreviousSecondFinger = [SecondFinger locationInView:nil];
    // Break down the current finger coordinates...
    float B0x = PreviousFirstFinger.x;
    float B0y = PreviousFirstFinger.y;
    float B1x = PreviousSecondFinger.x;
    float B1y = PreviousSecondFinger.y;


    // Calculate the zoom resulting from the two fingers moving toward or away from each other...
    float OldScale = Scale;
    Scale *= sqrt((B0x-B1x)*(B0x-B1x) + (B0y-B1y)*(B0y-B1y))/sqrt((A0x-A1x)*(A0x-A1x) + (A0y-A1y)*(A0y-A1y));

    // Calculate the old and new centroids so that we can compare the centroid's movement...
    CGPoint OldCentroid = { (A0x + A1x)/2, (A0y + A1y)/2 };
    CGPoint NewCentroid = { (B0x + B1x)/2, (B0y + B1y)/2 };    

    // Calculate the pan values to apply to the view so that the combination of zoom and pan
    // appear to apply to the centroid rather than the center of the view...
    Center.x = NewCentroid.x + (Scale/OldScale)*(self.view.center.x - OldCentroid.x);
    Center.y = NewCentroid.y + (Scale/OldScale)*(self.view.center.y - OldCentroid.y);
}

视图控制器通过为相关视图分配新的比例和中心来处理事件。我注意到其他手势识别器倾向于允许控制器进行一些数学运算,但我尝试在识别器中完成所有数学运算。

-(void)handlePixelTrack:(PixelTrackGestureRecognizer*)sender
{
    sender.view.center= sender.Center;
    sender.view.transform = CGAffineTransformMakeScale(sender.Scale, sender.Scale);
}
于 2012-08-10T04:41:03.137 回答
1

更简单的解决方案是将您的视图放在滚动视图中。然后你可以免费捏和平移。否则,您可以将平移和捏合手势代表都设置为 self 并为 shouldRecognizeSimultaneously 返回 YES。至于放大到用户手指的中心,我从来没有正确解决过这个问题,但它涉及anchorPoint在更改其比例之前操纵视图图层(我认为)。

于 2012-08-07T23:51:25.787 回答