0

我创建了一个基于 Vuforia 平台的增强现实应用程序。我正在修改它,以便如果目标丢失,系统将使用目标的最后一个已知位置,以及来自 CoreMotion 的设备方向数据,以将对象保持在正确的位置。

最后一点我需要帮助 - 集成 CoreMotion 数据。我认为最好的方法是根据陀螺输入旋转虚拟相机,但我不是 OpenGL ES 专家。有人可以阐明执行此操作的最佳方法吗?我知道如何获取设备方向数据,这是我需要一些指导的 OpenGL 和矩阵代数。

我的 renderFrame 方法如下。

-(void)renderFrameQCAR {
    [self setFramebuffer];

    // Clear colour and depth buffers
    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

    // Render video background and retrieve tracking state
    QCAR::State state = QCAR::Renderer::getInstance().begin();
    QCAR::Renderer::getInstance().drawVideoBackground();

    glEnable(GL_DEPTH_TEST);
    glEnable(GL_CULL_FACE);


    // Check if any trackables are visible.
    int numberOfTrackables = state.getNumActiveTrackables();
    QCAR::Matrix44F modelViewMatrix;


    // Skip rendering if there is nothing to render.
    if (numberOfTrackables > 0 || hasPickedUpTrackablePreviously == YES) {

        // If there are none, but one was picked up in the past, use the last pose matrix.
        if (numberOfTrackables == 0 && hasPickedUpTrackablePreviously == YES) {
            modelViewMatrix = trackablePoseMatrix;
        }
        else {
            // Get the trackable
            const QCAR::Trackable* trackable = state.getActiveTrackable(0);
            modelViewMatrix = QCAR::Tool::convertPose2GLMatrix(trackable->getPose());

            // Store these variables for use later.
            trackablePoseMatrix = modelViewMatrix;
            hasPickedUpTrackablePreviously = YES;
        }


        // Fetch the 3D object to render.
        Object3D *obj3D;

        if (currentlyChangingTextures == YES || useDummyModel == YES) {
            obj3D = dummyObject;
        }
        else {
            obj3D = [objects3D objectAtIndex:0];
        }


        // Render using the appropriate version of OpenGL
        // OpenGL 2
        QCAR::Matrix44F modelViewProjection;

        // Apply usual transformations here
        ShaderUtils::translatePoseMatrix(sideToSideFloat, forwardBackFloat, 0.0f, &modelViewMatrix.data[0]);
        ShaderUtils::scalePoseMatrix(kObjectScale * sizeFloat, kObjectScale * sizeFloat, kObjectScale * sizeFloat, &modelViewMatrix.data[0]);
        ShaderUtils::rotatePoseMatrix(0.0f + rotationAngleFloat, 0.0f, 0.0f, 1.0f, &modelViewMatrix.data[0]);


        // Apply our translation vector here based on gesture info from the buttonOverlayViewController
        QCAR::Vec3F translationFromWorldPerspective = SampleMath::Vec3FTransformNormal(translationVectorFromCamerasPerspective, inverseModelViewMatrix);

        translationFromWorldPerspective = SampleMath::Vec3FNormalize(translationFromWorldPerspective);

        theTranslation.data[0] = theTranslation.data[0] + speed*translationFromWorldPerspective.data[0];
        theTranslation.data[1] = theTranslation.data[1] + speed*translationFromWorldPerspective.data[1];
        theTranslation.data[2] = 0.0f;

        ShaderUtils::translatePoseMatrix(theTranslation.data[0], theTranslation.data[1], theTranslation.data[2], &modelViewMatrix.data[0]);

        // Update inverseModelViewMatrix
        inverseModelViewMatrix = SampleMath::Matrix44FInverse(modelViewMatrix);

        // Multiply modelview and projection matrix as usual
        ShaderUtils::multiplyMatrix(&qUtils.projectionMatrix.data[0], &modelViewMatrix.data[0], &modelViewProjection.data[0]);

        glUseProgram(shaderProgramID);

        glVertexAttribPointer(vertexHandle, 3, GL_FLOAT, GL_FALSE, 0, (const GLvoid*)obj3D.vertices);
        glVertexAttribPointer(normalHandle, 3, GL_FLOAT, GL_FALSE, 0, (const GLvoid*)obj3D.normals);
        glVertexAttribPointer(textureCoordHandle, 2, GL_FLOAT, GL_FALSE, 0, (const GLvoid*)obj3D.texCoords);

        glEnableVertexAttribArray(vertexHandle);
        glEnableVertexAttribArray(normalHandle);
        glEnableVertexAttribArray(textureCoordHandle);

        glActiveTexture(GL_TEXTURE0);
        glBindTexture(GL_TEXTURE_2D, [obj3D.texture textureID]);
        glUniformMatrix4fv(mvpMatrixHandle, 1, GL_FALSE, (const GLfloat*)&modelViewProjection.data[0]);
        glDrawArrays(GL_TRIANGLES, 0, obj3D.numVertices);

        ShaderUtils::checkGlError("EAGLView renderFrameQCAR");
    }


    // Disable these things.
    glDisable(GL_DEPTH_TEST);
    glDisable(GL_CULL_FACE);

    glDisableVertexAttribArray(vertexHandle);
    glDisableVertexAttribArray(normalHandle);
    glDisableVertexAttribArray(textureCoordHandle);

    QCAR::Renderer::getInstance().end();
    [self presentFramebuffer];
}

谢谢!!

4

1 回答 1

1

我还没有使用过 Vuforia,所以我不完全理解你的代码,但是我已经成功地创建了一个使用陀螺仪和指南针来控制相机的 AR 应用程序。这是我的相机矩阵代码:

GLKMatrix4 cameraMatrix = GLKMatrix4Identity;
cameraMatrix = GLKMatrix4Rotate(cameraMatrix, GLKMathDegreesToRadians((zenith-90)), 1.0f, 0.0f, 0.0f);
cameraMatrix = GLKMatrix4Rotate(cameraMatrix, GLKMathDegreesToRadians(azimuth), 0.0f, 1.0f, 0.0f);

其中: 天顶=(180 - 陀螺仪滚动角)[0 点直线向上,180 点直线向下], 方位角= 罗盘角(0 N、90 E、180 S、270 W)

在顶点着色器中,glPosition的计算公式为:

gl_Position = uProjectionMatrix * uCameraMatrix * vec4(position, 1.0);

我正在使用点精灵,但位置保存了我的粒子在 3D 世界空间中的坐标。在您的情况下,我猜这是您可以替换其他矩阵(如 modelViewMatrix 等)的地方。

请注意您的矩阵乘法顺序!

于 2012-12-23T00:59:23.093 回答