0

我们目前有一条由 3D 空间中的点组成的(曲线)线(在开放式发明者中,但该问题的解决方案可能与产品无关)。

我们想要绘制一个描述从该线正交投影的带的表面,并且在旋转时始终面向相机。

(例如,用户会看到一条丝带,它以线条本身作为它的一侧来描述线条的曲线,然后当我们围绕场景旋转时,丝带将围绕线条“旋转”,因此它总是面向用户)

我们有:

  • 摄像机的位置和旋转

  • 沿线各点的位置

我们认为一种方法是,对于每对相邻点,找到与相机正交的平面和与这些点之间的线正交的平面相交的线,并沿该线投影设定的距离,但这并没有不要给我们任何接近正确结果的地方。(下面的故障代码)。

这种方法是否正确,如果正确,下面应该描述它的代码有什么问题?

如果没有,我们如何实现核心目标(功能区始终面向用户)?

SbVec3f getOuterPoint(SbVec3f p3, const float ribbon_width, float cosine, float sine)
{
    return SbVec3f(p3[0] + ribbon_width*cosine, p3[1] - ribbon_width*sine, p3[2]);
}

SbVec3f getOuterPoint(SbVec3f old_p3, SbVec3f other_point, const float ribbon_width)
{
    float oangle = atan2(old_p3[1] - other_point[1], old_p3[0] - other_point[0]);
    float ocosine = cosf(oangle);
    float osine = sinf(oangle);
    return getOuterPoint(old_p3, ribbon_width, ocosine, osine);
}

SbVec3f getOuterPoint(SbVec3f p0, SbVec3f p1, const float ribbon_width, SoCamera* camera)
{
    SbVec3f axis;
    float angle;
    SoSFRotation camera_rotation = camera->orientation;
    camera_rotation.getValue(axis, angle);
    //std::cout << axis[0] << " " << axis[1] << " " << axis[2] << ":" << angle << std::endl;

    const SbVec3f &camera_position = camera->position.getValue();

    SbVec3f camera_axis = axis;

    SbVec3f well_axis = p1 - p0;

    well_axis.normalize();
    camera_axis.normalize();

    float cam_constant[3] = { -p1[0], -p1[1], -p1[2] };
    float well_constant[3] = { -p1[0], -p1[1], -p1[2] };

    /*

    //float p1_constant = camera_axis[0] * p1[0] + camera_axis[1] * p1[1] + camera_axis[2] * p1[2]
    //  - (camera_axis[0] * camera_position[0] + camera_axis[1] * camera_position[1] + camera_axis[2] * camera_position[2]);

    //// X, Y, Z are unknown
    //float line_unknown = camera_axis[0] * X + camera_axis[1] * Y + camera_axis[2] * Z;

    // 
    //
    // camera_axis.x * (x - p1[0]) + camera_axis.y * (y - p1[1]) + camera_axis.z * (z - p1[2]) = 0      (1)
    // well_axis.x   * (x - p1[0]) + well_axis.y   * (y - p1[1]) + well_axis.z   * (z - p1[2]) = 0      (2)

    // let z become free variable t

    // camera_axis.x * (x - p1[0]) + camera_axis.y * (y - p1[1]) = - camera_axis.z * (t - p1[2]) 
    // well_axis.x   * (x - p1[0]) + well_axis.y   * (y - p1[1]) = - well_axis.z   * (t - p1[2]) 

    // camera_axis.x * (x - p1[0]) + camera_axis.y * (y - p1[1]) = - camera_axis.z * t - camera_axis.z * p1[2] 
    // well_axis.x * (x - p1[0]) + well_axis.y * (y - p1[1]) = - well_axis.z * t - well_axis.z * p1[2] 

    // camera_axis.x * x  + camera_axis.y * y  = - camera_axis.z * t - camera_axis.z * p1[2] + camera_axis.x  *p1[0] + camera_axis.y  * p1[1] (3)
    // well_axis.x * x  + well_axis.y * y  = - well_axis.z * t - well_axis.z * p1[2] + well_axis.x  *p1[0] + well_axis.y  * p1[1]               (4)

    (3) * well_axis.x:

    well_axis.x * camera_axis.x * x  + well_axis.x * camera_axis.y * y  = well_axis.x * ( - camera_axis.z * t - camera_axis.z * p1[2] + camera_axis.x  *p1[0] + camera_axis.y  * p1[1])

    (4) * camera_axis.x
    camera_axis.x * well_axis.x * x  + camera_axis.x * well_axis.y * y  = camera_axis.x * ( - well_axis.z * t - well_axis.z * p1[2] + well_axis.x  *p1[0] + well_axis.y  * p1[1])

    Subtracting
    well_axis.x * camera_axis.y * y - camera_axis.x * well_axis.y * y  = well_axis.x * ( - camera_axis.z * t - camera_axis.z * p1[2] + camera_axis.x  *p1[0] + camera_axis.y  * p1[1]) - camera_axis.x * ( - well_axis.z * t - well_axis.z * p1[2] + well_axis.x  *p1[0] + well_axis.y  * p1[1])

    (well_axis.x * camera_axis.y - camera_axis.x * well_axis.y) * y = well_axis.x * ( - camera_axis.z * t - camera_axis.z * p1[2] + camera_axis.x  *p1[0] + camera_axis.y  * p1[1]) - camera_axis.x * ( - well_axis.z * t - well_axis.z * p1[2] + well_axis.x  *p1[0] + well_axis.y  * p1[1])
    y = well_axis.x * ( - camera_axis.z * t - camera_axis.z * p1[2] + camera_axis.x  *p1[0] + camera_axis.y  * p1[1]) - camera_axis.x * ( - well_axis.z * t - well_axis.z * p1[2] + well_axis.x  *p1[0] + well_axis.y  * p1[1]) / (well_axis.x * camera_axis.y - camera_axis.x * well_axis.y)


    (3) * well_axis.y
    well_axis.y * camera_axis.x * x  + well_axis.y * camera_axis.y * y  = well_axis.y * ( - camera_axis.z * t - camera_axis.z * p1[2] + camera_axis.x  *p1[0] + camera_axis.y  * p1[1])
    (4) * camera_axis.y
    camera_axis.y * well_axis.x * x  + camera_axis.y * well_axis.y * y  = camera_axis.y * ( - well_axis.z * t - well_axis.z * p1[2] + well_axis.x  *p1[0] + well_axis.y  * p1[1])

    Subtracting
    x = well_axis.y * ( - camera_axis.z * t - camera_axis.z * p1[2] + camera_axis.x  *p1[0] + camera_axis.y  * p1[1]) - camera_axis.y * ( - well_axis.z * t - well_axis.z * p1[2] + well_axis.x  *p1[0] + well_axis.y  * p1[1]) / well_axis.y * camera_axis.x  - camera_axis.y * well_axis.x


    So:
    x = well_axis.y * ( - camera_axis.z * t - camera_axis.z * p1[2] + camera_axis.x  *p1[0] + camera_axis.y  * p1[1]) - camera_axis.y * ( - well_axis.z * t - well_axis.z * p1[2] + well_axis.x  *p1[0] + well_axis.y  * p1[1]) / (well_axis.y * camera_axis.x - camera_axis.y * well_axis.x)
    y = well_axis.x * ( - camera_axis.z * t - camera_axis.z * p1[2] + camera_axis.x  *p1[0] + camera_axis.y  * p1[1]) - camera_axis.x * ( - well_axis.z * t - well_axis.z * p1[2] + well_axis.x  *p1[0] + well_axis.y  * p1[1]) / (well_axis.x * camera_axis.y - camera_axis.x * well_axis.y)
    z = t


    x = ((well_axis.z * camera_axis.y - camera_axis.z * well_axis.y) * t
    - camera_axis.z * well_axis.y * p1[2]
    + camera_axis.x  * well_axis.y * p1[0]
    + well_axis.z * camera_axis.y * p1[2]
    - well_axis.x * camera_axis.y * p1[0] ) 
    / (well_axis.y * camera_axis.x - camera_axis.y * well_axis.x)

    y =  ( - camera_axis.z * well_axis.x * t - camera_axis.z * well_axis.x * p1[2] + camera_axis.x  * well_axis.x * p1[0] + camera_axis.y  * well_axis.x * p1[1] + well_axis.z * camera_axis.x * t + well_axis.z * camera_axis.x * p1[2] - well_axis.x  * camera_axis.x * p1[0] - well_axis.y * camera_axis.x * p1[1]) / (well_axis.x * camera_axis.y - camera_axis.x * well_axis.y)


    y =  ((well_axis.z * camera_axis.x - camera_axis.z * well_axis.x) * t 
    - camera_axis.z * well_axis.x * p1[2]
    + camera_axis.y  * well_axis.x * p1[1] 
    + well_axis.z * camera_axis.x * p1[2]
    - well_axis.y * camera_axis.x * p1[1])
    / (well_axis.x * camera_axis.y - camera_axis.x * well_axis.y)


    // Given these two equations, we now have a parameterised equation

    // (x,y,z) = (mt + a, nt + b, t) = (m, n, 1)t + (a + b + 0)
    // 
    // m = ((well_axis[2] * camera_axis[1] - camera_axis[2] * well_axis[1])) / (well_axis[1] * camera_axis[0] - camera_axis[1] * well_axis[0])
    // 
    // n = ((well_axis[2] * camera_axis[0] - camera_axis[2] * well_axis[0])) / (well_axis[0] * camera_axis[1] - camera_axis[0] * well_axis[1])
    // 
    // a = (- camera_axis[2] * well_axis[1] * p1[2] + camera_axis[0] * well_axis[1] * p1[0] + well_axis[2] * camera_axis[1] * p1[2] - well_axis[0] * camera_axis[1] * p1[0]) / (well_axis[1] * camera_axis[0] - camera_axis[1] * well_axis[0])
    // 
    // b = (- camera_axis[2] * well_axis[0] * p1[2] +camera_axis[1] * well_axis[0] * p1[1]  + well_axis[2] * camera_axis[0] * p1[2] - well_axis[1] * camera_axis[0] * p1[1]) / (well_axis[0] * camera_axis[1] - camera_axis[0] * well_axis[1])
*/

    float m = ((well_axis[2] * camera_axis[1] - camera_axis[2] * well_axis[1])) / (well_axis[1] * camera_axis[0] - camera_axis[1] * well_axis[0]);
    // 
    float n = ((well_axis[2] * camera_axis[0] - camera_axis[2] * well_axis[0])) / (well_axis[0] * camera_axis[1] - camera_axis[0] * well_axis[1]);
    // 
    float a = (-camera_axis[2] * well_axis[1] * p1[2] + camera_axis[0] * well_axis[1] * p1[0] + well_axis[2] * camera_axis[1] * p1[2] - well_axis[0] * camera_axis[1] * p1[0]) / (well_axis[1] * camera_axis[0] - camera_axis[1] * well_axis[0]);
    // 
    float b = (-camera_axis[2] * well_axis[0] * p1[2] + camera_axis[1] * well_axis[0] * p1[1] + well_axis[2] * camera_axis[0] * p1[2] - well_axis[1] * camera_axis[0] * p1[1]) / (well_axis[0] * camera_axis[1] - camera_axis[0] * well_axis[1]);

    float t = 2;

    return SbVec3f(m * t + a, n * t + b, t);
}

void setVertices(WellBore * pWell, SoVertexProperty * vertex_property, SoCamera* camera)
{
    int nPoints = pWell->nPoints;

    const float ribbon_width = 50.0f;

    int vertex_index = 0;
    int face_index = 0;

    int max_to_draw = nPoints;
    vertex_property->vertex.deleteValues(max_to_draw);

    SbVec3f on_well0x = pWell->points[1];
    SbVec3f in_space0x = getOuterPoint(pWell->points[0], on_well0x, ribbon_width, camera);

    for (int i = 0; i < max_to_draw - 1; ++i)
    {
        SbVec3f on_well0 = pWell->points[i];
        SbVec3f on_well1 = pWell->points[i + 1];

        SbVec3f in_space1 = getOuterPoint(on_well0, on_well1, ribbon_width, camera);

        vertex_property->vertex.set1Value(vertex_index + 0, in_space0x);
        vertex_property->vertex.set1Value(vertex_index + 1, on_well0x);
        vertex_property->vertex.set1Value(vertex_index + 2, on_well1);
        vertex_property->vertex.set1Value(vertex_index + 3, in_space0x);
        vertex_property->vertex.set1Value(vertex_index + 4, in_space1);

        vertex_index += 5;

        on_well0x = on_well1;
        in_space0x = in_space1;
    }
}

void cameraDebug(SoXtViewer * myViewer, WellBore* pWell)
{
    SoCamera* camera = myViewer->getCamera();

    SbVec3f camera_position = camera->position.getValue();
    //std::cout << camera_position[0] << " " << camera_position[1] << " " << camera_position[2] << std::endl;

    SbVec3f axis;
    float angle;
    SoSFRotation camera_rotation = camera->orientation;
    camera_rotation.getValue(axis, angle);
    //std::cout << axis[0] << " " << axis[1] << " " << axis[2] << ":" << angle << std::endl;

    SoNode* node = SoNode::getByName(SbName("points"));
    SbString str;
    SoVertexProperty* vertices = static_cast<SoVertexProperty*>(static_cast<SoVertexShape*>(node)->vertexProperty.getValue());
    //std::cout << vertices->vertex.getNum() << str << std::endl;

    setVertices(pWell, vertices, camera);
}
4

1 回答 1

0

对你的问题的一个狭隘的具体答案相对容易。您的问题中所述的方法听起来是正确的,但是您提供的代码似乎太复杂了......简而言之:您有一个由功能区路径中的两个连续点定义的向量和由相机方向定义的第二个向量。您需要的向量只是这两个向量的叉积。相机位置无关紧要,只有方向重要。使用 Open Inventor,您必须使用默认方向矢量和当前相机方向来计算相机方向,如下所示:

// Camera (reverse) direction vector
SbVec3f defVec(0,0,1), camVec;
const SbRotation& camRot = camera->orientation.getValue();
camRot.multVec( defVec, camVec );
camVec.normalize();

如果“verts”是功能区路径,那么对于路径的每一段,您都有两个坐标,并且可以计算两个额外的偏移坐标,这样四个坐标就定义了一个面向用户的矩形多边形,例如:

SbVec3f ribVec, orthoVec;
for (int i = 0; i < numSegs; ++i) {
  ribVec = verts[i+1] - verts[i];
  ribVec.normalize();
  orthoVec = camVec.cross(ribVec);
  orthoVec.normalize();
  verts2[i*4  ] = verts[i];  // i*4 because 4 verts per segment
  verts2[i*4+1] = verts[i+1];
  verts2[i*4+2] = verts[i+1] + ribbonWidth * orthoVec;
  verts2[i*4+3] = verts[i  ] + ribbonWidth * orthoVec;

现在你可以处理更难的部分了——如何处理这些多边形之间的“关节”,让丝带看起来不错……

于 2016-06-10T19:10:40.223 回答