19

我正在使用 iOS 上的 SceneKit 开发一些代码,在我的代码中,我想确定全局 z 平面上的 x 和 y 坐标,其中 z 为 0.0,x 和 y 由轻按手势确定。我的设置如下:

    override func viewDidLoad() {
    super.viewDidLoad()

    // create a new scene
    let scene = SCNScene()

    // create and add a camera to the scene
    let cameraNode = SCNNode()
    let camera = SCNCamera()
    cameraNode.camera = camera
    scene.rootNode.addChildNode(cameraNode)
    // place the camera
    cameraNode.position = SCNVector3(x: 0, y: 0, z: 15)

    // create and add an ambient light to the scene
    let ambientLightNode = SCNNode()
    ambientLightNode.light = SCNLight()
    ambientLightNode.light.type = SCNLightTypeAmbient
    ambientLightNode.light.color = UIColor.darkGrayColor()
    scene.rootNode.addChildNode(ambientLightNode)

    let triangleNode = SCNNode()
    triangleNode.geometry = defineTriangle();
    scene.rootNode.addChildNode(triangleNode)

    // retrieve the SCNView
    let scnView = self.view as SCNView

    // set the scene to the view
    scnView.scene = scene

    // configure the view
    scnView.backgroundColor = UIColor.blackColor()
    // add a tap gesture recognizer
    let tapGesture = UITapGestureRecognizer(target: self, action: "handleTap:")
    let gestureRecognizers = NSMutableArray()
    gestureRecognizers.addObject(tapGesture)
    scnView.gestureRecognizers = gestureRecognizers
}

func handleTap(gestureRecognize: UIGestureRecognizer) {
    // retrieve the SCNView
    let scnView = self.view as SCNView
    // check what nodes are tapped
    let p = gestureRecognize.locationInView(scnView)
    // get the camera
    var camera = scnView.pointOfView.camera

    // screenZ is percentage between z near and far
    var screenZ = Float((15.0 - camera.zNear) / (camera.zFar - camera.zNear))
    var scenePoint = scnView.unprojectPoint(SCNVector3Make(Float(p.x), Float(p.y), screenZ))
    println("tapPoint: (\(p.x), \(p.y)) scenePoint: (\(scenePoint.x), \(scenePoint.y), \(scenePoint.z))")
}

func defineTriangle() -> SCNGeometry {

    // Vertices
    var vertices:[SCNVector3] = [
        SCNVector3Make(-2.0, -2.0, 0.0),
        SCNVector3Make(2.0, -2.0, 0.0),
        SCNVector3Make(0.0, 2.0, 0.0)
    ]

    let vertexData = NSData(bytes: vertices, length: vertices.count * sizeof(SCNVector3))
    var vertexSource = SCNGeometrySource(data: vertexData,
        semantic: SCNGeometrySourceSemanticVertex,
        vectorCount: vertices.count,
        floatComponents: true,
        componentsPerVector: 3,
        bytesPerComponent: sizeof(Float),
        dataOffset: 0,
        dataStride: sizeof(SCNVector3))

    // Normals
    var normals:[SCNVector3] = [
        SCNVector3Make(0.0, 0.0, 1.0),
        SCNVector3Make(0.0, 0.0, 1.0),
        SCNVector3Make(0.0, 0.0, 1.0)
    ]

    let normalData = NSData(bytes: normals, length: normals.count * sizeof(SCNVector3))
    var normalSource = SCNGeometrySource(data: normalData,
        semantic: SCNGeometrySourceSemanticNormal,
        vectorCount: normals.count,
        floatComponents: true,
        componentsPerVector: 3,
        bytesPerComponent: sizeof(Float),
        dataOffset: 0,
        dataStride: sizeof(SCNVector3))

    // Indexes
    var indices:[CInt] = [0, 1, 2]
    var indexData  = NSData(bytes: indices, length: sizeof(CInt) * indices.count)
    var indexElement = SCNGeometryElement(
        data: indexData,
        primitiveType: .Triangles,
        primitiveCount: 1,
        bytesPerIndex: sizeof(CInt)
    )

    var geo = SCNGeometry(sources: [vertexSource, normalSource], elements: [indexElement])

    // material
    var material = SCNMaterial()
    material.diffuse.contents  = UIColor.redColor()
    material.doubleSided = true
    material.shininess = 1.0;
    geo.materials = [material];

    return geo
}

如你看到的。我有一个三角形,高 4 个单位,宽 4 个单位,并设置在以 x, y (0.0, 0.0) 为中心的 z 平面 (z = 0) 上。相机是默认的 SCNCamera,它看起来在负 z 方向,我把它放在 (0, 0, 15)。zNear 和 zFar 的默认值分别为 1.0 和 100.0。在我的 handleTap 方法中,我获取点击的 x 和 y 屏幕坐标,并尝试找到 z = 0.0 的 x 和 y 全局场景坐标。我正在调用 unprojectPoint。

unprojectPoint 的文档表明

取消投影 z 坐标为 0.0 的点会返回近剪裁平面上的点;取消投影 z 坐标为 1.0 的点会返回远裁剪平面上的点。

虽然它没有具体说明近平面和远平面之间的点之间存在线性关系,但我已经做出了这个假设并将 screenZ 的值计算为 z = 的近平面和远平面之间的百分比距离0 平面位于。要检查我的答案,我可以在三角形的角附近单击,因为我知道它们在全局坐标中的位置。

我的问题是,当我开始更改相机上的 zNear 和 zFar 剪裁平面时,我没有得到正确的值并且没有得到一致的值。所以我的问题是,我该怎么做?最后,我将创建一个新的几何图形并将其放置在与用户单击位置相对应的 z 平面上。

在此先感谢您的帮助。

4

3 回答 3

34

3D 图形管道中的典型深度缓冲区不是线性的。透视分割导致标准化设备坐标中的深度处于不同的比例。(另见此处。)

因此,您输入的 z 坐标unprojectPoint实际上并不是您想要的。

那么,如何找到与世界空间中的平面匹配的归一化深度坐标?好吧,如果该平面与相机正交 - 这会有所帮助 - 你的相机就是这样。然后你需要做的就是在那个平面上投影一个点:

let projectedOrigin = gameView.projectPoint(SCNVector3Zero)

现在您在 3D 视图 + 标准化深度空间中获得了世界原点的位置。要将 2D 视图空间中的其他点映射到该平面上,请使用该向量的 z 坐标:

let vp = gestureRecognizer.locationInView(scnView)
let vpWithZ = SCNVector3(x: vp.x, y: vp.y, z: projectedOrigin.z)
let worldPoint = gameView.unprojectPoint(vpWithZ)

position这为您提供了世界空间中的一个点,该点将单击/点击位置映射到 z = 0 平面,如果您想向用户显示该位置,则适合用作节点的 。

(请注意,这种方法仅在您映射到垂直于相机视图方向的平面上时才有效。如果要将视图坐标映射到不同方向的表面上,则归一化深度值vpWithZ不会是恒定的.)

于 2014-08-06T06:54:42.090 回答
4

经过一些实验,这是我们开发的将触摸点投影到场景中给定点的任意深度。

您需要的修改是计算 Z=0 平面与这条线的交点,这就是您的观点。

private func touchPointToScenePoint(recognizer: UIGestureRecognizer) -> SCNVector3 {
    // Get touch point
    let touchPoint = recognizer.locationInView(sceneView)

    // Compute near & far points
    let nearVector = SCNVector3(x: Float(touchPoint.x), y: Float(touchPoint.y), z: 0)
    let nearScenePoint = sceneView.unprojectPoint(nearVector)
    let farVector = SCNVector3(x: Float(touchPoint.x), y: Float(touchPoint.y), z: 1)
    let farScenePoint = sceneView.unprojectPoint(farVector)

    // Compute view vector
    let viewVector = SCNVector3(x: Float(farScenePoint.x - nearScenePoint.x), y: Float(farScenePoint.y - nearScenePoint.y), z: Float(farScenePoint.z - nearScenePoint.z))

    // Normalize view vector
    let vectorLength = sqrt(viewVector.x*viewVector.x + viewVector.y*viewVector.y + viewVector.z*viewVector.z)
    let normalizedViewVector = SCNVector3(x: viewVector.x/vectorLength, y: viewVector.y/vectorLength, z: viewVector.z/vectorLength)

    // Scale normalized vector to find scene point
    let scale = Float(15)
    let scenePoint = SCNVector3(x: normalizedViewVector.x*scale, y: normalizedViewVector.y*scale, z: normalizedViewVector.z*scale)

    print("2D point: \(touchPoint). 3D point: \(nearScenePoint). Far point: \(farScenePoint). scene point: \(scenePoint)")

    // Return <scenePoint>
    return scenePoint
}
于 2016-09-11T04:19:22.710 回答
0

这是我在 3d 空间中获得准确点的解决方案。

// If the object is in 0,0,0 point you can use
float zDepth = [self projectPoint:SCNVector3Zero].z;

// or myNode.position.
//zDepth = [self projectPoint:myNode.position].z;

NSLog(@"2D point: X %f, Y: %f, zDepth: %f", click.x, click.y, zDepth);

SCNVector3 worldPoint = [self unprojectPoint:SCNVector3Make(click.x, click.y,  zDepth)];

SCNVector3 nearVec = SCNVector3Make(click.x, click.y, 0.0);
SCNVector3 nearPoint = [self unprojectPoint:nearVec];

SCNVector3 farVec = SCNVector3Make(click.x, click.y, 1.0);
SCNVector3 farPoint = [self unprojectPoint:farVec];

float z_magnitude = fabs(farPoint.z - nearPoint.z);
float near_pt_factor = fabs(nearPoint.z) / z_magnitude;
float far_pt_factor = fabs(farPoint.z) / z_magnitude;

GLKVector3 nearP = GLKVector3Make(nearPoint.x, nearPoint.y, nearPoint.z);
GLKVector3 farP = GLKVector3Make(farPoint.x, farPoint.y, farPoint.z);

GLKVector3 final_pt = GLKVector3Add(GLKVector3MultiplyScalar(nearP, far_pt_factor), GLKVector3MultiplyScalar(farP, near_pt_factor));

NSLog(@"3D world point = %f, %f, %f", final_pt.x, final_pt.y, worldPoint.z);
于 2018-06-25T00:53:39.307 回答