0

我有来自 Kinect 的屏幕空间坐标。我需要为我的opengl相机从kinect获取世界空间坐标;或者我需要将我的相机放在与 Kinect 相同的位置,以便我可以看到从 Kinect 获得的点。我想我需要我的opengl相机的世界坐标来检测我正在绘制的其他物体的碰撞检测。如何获得此 ScreenspaceToWorld 映射?

我使用这个答案作为参考,但我没有 Kinect 的位置和方向来计算视图和投影矩阵。

编辑

Kinect 视野

我需要能够在与 Kinect 相同的坐标空间中渲染对象。为此,我使用了使用这些值的投影矩阵:

近平面:0.01f
远平面:1700.0f
水平视野:60.0f
垂直视野:45.0f
相机位置:(0.0f,2.0f,3.0f)
相机方向:(0.0f,2.0f,-3.0f)

使用这些值,我看不到任何渲染的对象。这是我的渲染代码:

GameRenderer::GameRenderer()
{
    tracker = new GestureTracker();
    tracker->init();
    tracker->startGestureDetection();
}

bool GameRenderer::Init(int argc, char* argv[])
{        
    GLfloat points[] = { 0.0f, 0.5f, 0.0f, 0.5f, -0.5f, 0.0f, -0.5f, -0.5f, 0.0f };
    GLfloat point[] = { 0.0f, 0.0f, 0.0f };
    glGenBuffers(1, &m_vbo);
    glBindBuffer(GL_ARRAY_BUFFER, m_vbo);

    glBufferData(GL_ARRAY_BUFFER, sizeof(points), points, GL_DYNAMIC_DRAW);
    // vao
    glGenVertexArrays(1, &m_vao);
    glBindVertexArray(m_vao);
    glEnableVertexAttribArray(0);
    glBindBuffer(GL_ARRAY_BUFFER, m_vbo);
    glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, NULL);


    const char *vertex_shader = "#version 410\n"
                                "in vec3 vp;"
                                "uniform mat4 model_matrix;"
                                "uniform mat4 view_matrix;"
                                "uniform mat4 projection_matrix;"
                                "void main () {"
                                "  gl_Position = projection_matrix * view_matrix * model_matrix * vec4(vp, 1.0);"
                                "}";

    const char *fragment_shader = "#version 410\n"
                                  "out vec4 frag_colour;"
                                  "void main () {"
                                  "  frag_colour = vec4(0.0f, 0.5f, 1.0f, 1.0f);"
                                  "}";

    m_vert_shader = glCreateShader(GL_VERTEX_SHADER);
    glShaderSource(m_vert_shader, 1, &vertex_shader, NULL);
    glCompileShader(m_vert_shader);
    int params = -1;
    glGetShaderiv( m_vert_shader, GL_COMPILE_STATUS, &params );
    if ( GL_TRUE != params ) {
        ShaderLog(m_vert_shader);
        return 1;
    }
    m_frag_shader = glCreateShader(GL_FRAGMENT_SHADER);
    glShaderSource(m_frag_shader, 1, &fragment_shader, NULL);
    glCompileShader(m_frag_shader);
    params = -1;
    glGetShaderiv( m_frag_shader, GL_COMPILE_STATUS, &params );
    if ( GL_TRUE != params ) {
        ShaderLog(m_frag_shader);
        return 1;
    }
    m_shader_program = glCreateProgram();
    glAttachShader(m_shader_program, m_frag_shader);
    glAttachShader(m_shader_program, m_vert_shader);
    glLinkProgram(m_shader_program);

    this->m_model_matrix = glm::mat4(1.0f);
    m_model_matrix = glm::scale(m_model_matrix, glm::vec3(4.0f));
    // m_model_matrix = glm::rotate(m_model_matrix, glm::radians(10.0f), glm::vec3(0.0f));
    m_model_matrix = glm::translate(m_model_matrix, glm::vec3(0.0f));
    this->m_view_matrix = glm::mat4(1.0f);
    this->m_proj_matrix = glm::mat4(1.0f);

    GLuint model_mat_location = glGetUniformLocation(this->m_shader_program, "model_matrix");
    glUseProgram(this->m_shader_program);
    glUniformMatrix4fv(model_mat_location, 1, GL_FALSE, glm::value_ptr(this->m_model_matrix));
    return 1;
}

void GameRenderer::Draw(Camera& camera)
{
    auto hands = tracker->getHands();

    if(hands.size()) {
        Point3f coordinates = hands[0].getHandPosition();
        glBindBuffer(GL_ARRAY_BUFFER, m_vbo);
        GLfloat hand_coordinate[3] = { coordinates.x, coordinates.y, coordinates.z };
        std::cout << coordinates.x << " " << coordinates.y << " " << coordinates.z << std::endl;
        // glBufferData(GL_ARRAY_BUFFER, sizeof(GLfloat) * 3, hand_coordinate, GL_DYNAMIC_DRAW);
        this->m_model_matrix = glm::mat4(1.0f);
        m_model_matrix = glm::scale(m_model_matrix, glm::vec3(10.0f));
        m_model_matrix = glm::translate(m_model_matrix, glm::vec3(coordinates.x, coordinates.y, coordinates.z));
        glUseProgram(this->m_shader_program);
        GLuint loc = glGetUniformLocation(this->m_shader_program, "model_matrix");
        glUniformMatrix4fv(loc, 1, GL_FALSE, glm::value_ptr(this->m_model_matrix));
        glUseProgram(0);
    }
    this->m_view_matrix = camera.GetViewMatrix();
    this->m_proj_matrix = glm::perspective(glm::radians(camera.GetFieldOfView()),
                                           60.0f/45.0f, camera.GetNearPlane(), camera.GetFarPlane());


    GLuint view_mat_location = glGetUniformLocation(this->m_shader_program, "view_matrix");
    glUseProgram(this->m_shader_program);
    glUniformMatrix4fv(view_mat_location, 1, GL_FALSE, glm::value_ptr(this->m_view_matrix));

    GLuint proj_mat_location = glGetUniformLocation(this->m_shader_program, "projection_matrix");
    glUseProgram(this->m_shader_program);
    glUniformMatrix4fv(proj_mat_location, 1, GL_FALSE, glm::value_ptr(this->m_proj_matrix));

    CheckGLError();
    glUseProgram(m_shader_program);
    glBindVertexArray(m_vao);
    glBindBuffer(GL_ARRAY_BUFFER, m_vbo);
    // glPointSize(50.0f);
    glDrawArrays(GL_TRIANGLES, 0, 3);
    glUseProgram(0);
    CheckGLError();
}

Kinect 深度框架的坐标值(这些是 Kinect 跟踪的手部坐标)是:

x y z
-75.1628 136.374 650.176
-73.7582 141.239 665.259
-69.9152 148.691 702.163
-67.1218 151.315 723.226
-64.4887 153.013 745.074
-62.3365 153.783 767.393
-62.3365 153.783 767.393
-60.1887 152.979 789.881
-57.9948 150.762 812.395
-55.8624 146.575 834.187
-56.2673 133.543 873.035

当 Kinect 检测到手时,我的几何体(现在是一个三角形)停止渲染,尽管它之前已经渲染过(因为它会在原点)。

在我的视图/投影矩阵或代码中需要更正什么才能在与 Kinect 相同的空间中渲染?

4

0 回答 0