3

我正在尝试为任何由三角形网格形成的对象编写光线追踪器。我正在使用外部库从 .ply 格式加载多维数据集,然后对其进行跟踪。到目前为止,我已经实现了大部分跟踪器,现在我尝试用单个立方体对其进行测试,但由于某种原因,我在屏幕上看到的只是一条红线。我已经尝试了几种方法来修复它,但我根本无法弄清楚。对于这个主要测试,我只创建主要光线,如果它们击中我的立方体,然后我将该像素着色为立方体的漫反射颜色并返回。为了检查射线与对象的交点,我将遍历所有形成该对象的三角形并将距离返回到最近的那个。如果您可以查看代码并告诉我可能出了什么问题以及在哪里出错,那就太好了。我将不胜感激。

射线三角交点:

bool intersectTri(const Vec3D& ray_origin, const Vec3D& ray_direction, const Vec3D& v0, const Vec3D& v1, const Vec3D& v2, double &t, double &u, double &v) const
    {

        Vec3D edge1 = v1 - v0;  
        Vec3D edge2 = v2 - v0;
        Vec3D pvec = ray_direction.cross(edge2);
        double det = edge1.dot(pvec);
        if (det > - THRESHOLD && det < THRESHOLD)
            return false;
        double invDet = 1/det;  
        Vec3D tvec = ray_origin - v0;
        u = tvec.dot(pvec)*invDet;
        if (u < 0 || u > 1)
            return false;
        Vec3D qvec = tvec.cross(edge1);
        v = ray_direction.dot(qvec)*invDet;
        if (v < 0 || u + v > 1)
            return false;
        t = edge2.dot(qvec)*invDet;
        if (t < 0)
            return false;
        return true;
    }   

//Object intersection
bool intersect(const Vec3D& ray_origin, const Vec3D& ray_direction, IntersectionData& idata, bool enforce_max) const
    {

        double tClosest;
        if (enforce_max)
        {
            tClosest = idata.t;
        }
        else
        {
            tClosest = TMAX;
        }

        for (int i = 0 ; i < indices.size() ; i++)
        {
            const Vec3D v0 = vertices[indices[i][0]];
            const Vec3D v1 = vertices[indices[i][1]];
            const Vec3D v2 = vertices[indices[i][2]];
            double t, u, v;
            if (intersectTri(ray_origin, ray_direction, v0, v1, v2, t, u, v))
            {
                if (t < tClosest)   
                {
                    idata.t = t;
                    tClosest = t;                   
                    idata.u = u;
                    idata.v = v; 
                    idata.index = i;
                }
            }
        }
        return (tClosest < TMAX && tClosest > 0) ? true : false;
    }

Vec3D trace(World world, Vec3D &ray_origin, Vec3D &ray_direction)
{

Vec3D objColor = world.background_color;
IntersectionData idata;
double coeff = 1.0;
int depth = 0;

double tClosest = TMAX; 
Object *hitObject = NULL;   
for (unsigned int i = 0 ; i < world.objs.size() ; i++)
{       
    IntersectionData idata_curr;
    if (world.objs[i].intersect(ray_origin, ray_direction, idata_curr, false)) 
    {
        if (idata_curr.t < tClosest && idata_curr.t > 0) 
        {
            idata.t = idata_curr.t;
            idata.u = idata_curr.u;
            idata.v = idata_curr.v;
            idata.index = idata_curr.index; 
            tClosest = idata_curr.t;            
            hitObject = &(world.objs[i]);
        }
    }
}
if (hitObject == NULL)
{
    return world.background_color;
}
else
{
    return hitObject->getDiffuse();
}
}

int main(int argc, char** argv)
{

parse("cube.ply");
Vec3D diffusion1(1, 0, 0);
Vec3D specular1(1, 1, 1);
Object cube1(coordinates, connected_vertices, diffusion1, specular1, 0, 0);
World wrld;
// Add objects to the world
wrld.objs.push_back(cube1);
Vec3D background(0, 0, 0);
wrld.background_color = background;
// Set light color
Vec3D light_clr(1, 1, 1);
wrld.light_colors.push_back(light_clr);
// Set light position
Vec3D light(0, 64, -10);
wrld.light_positions.push_back(light);

int width = 128;
int height = 128;
Vec3D *image = new Vec3D[width*height];
Vec3D *pixel = image;

// Trace rays
for (int y = -height/2 ; y < height/2 ; ++y)
{
    for (int x = -width/2 ; x < width/2 ; ++x, ++pixel)
    {
        Vec3D ray_dir(x+0.5, y+0.5, -1.0);
        ray_dir.normalize();
        Vec3D ray_orig(0.5*width, 0.5*height, 0.0);
        *pixel = trace(wrld, ray_orig, ray_dir);        
    }
}   

savePPM("./test.ppm", image, width, height);
return 0; 
}

我刚刚运行了一个测试用例,我得到了这个:

对于一个以 (0,0, -1.5) 为中心并在 X 和 Y 轴上按 100 缩放的单位立方体。投影似乎有问题,但我无法从结果中准确判断出什么。此外,在这种情况下(立方体以 (0,0) 为中心),最终对象不应该也出现在图片中间吗?修复:我通过在规范化和调用跟踪函数之前执行 ray_dir = ray_dir - ray_orig 解决了居中问题。不过,这种观点似乎是完全错误的。

4

1 回答 1

1

我继续工作,现在我开始根据 Phong 实现漫反射。

Vec3D trace(World world, Vec3D &ray_origin, Vec3D &ray_direction) {

Vec3D objColor = Vec3D(0);
IntersectionData idata;
double coeff = 1.0;
int depth = 0;
do
{
    double tClosest = TMAX; 
    Object *hitObject = NULL;   
    for (unsigned int i = 0 ; i < world.objs.size() ; i++)
    {       
        IntersectionData idata_curr;
        if (world.objs[i].intersect(ray_origin, ray_direction, idata_curr, false)) 
        {
            if (idata_curr.t < tClosest && idata_curr.t > 0) 
            {
                idata.t = idata_curr.t;
                idata.u = idata_curr.u;
                idata.v = idata_curr.v;
                idata.index = idata_curr.index; 
                tClosest = idata_curr.t;            
                hitObject = &(world.objs[i]);
            }
        }
    }   
    if (hitObject == NULL)
    {
        return world.background_color;
    }

    Vec3D newStart = ray_origin + ray_direction*idata.t;

    // Compute normal at intersection by interpolating vertex normals (PHONG Idea)
    Vec3D v0 = hitObject->getVertices()[hitObject->getIndices()[idata.index][0]];
    Vec3D v1 = hitObject->getVertices()[hitObject->getIndices()[idata.index][1]];
    Vec3D v2 = hitObject->getVertices()[hitObject->getIndices()[idata.index][2]];   

    Vec3D n1 = hitObject->getNormals()[hitObject->getIndices()[idata.index][0]];
    Vec3D n2 = hitObject->getNormals()[hitObject->getIndices()[idata.index][1]];
    Vec3D n3 = hitObject->getNormals()[hitObject->getIndices()[idata.index][2]];

//  Vec3D N = n1 + (n2 - n1)*idata.u + (n3 - n1)*idata.v;
    Vec3D N = v0.computeFaceNrm(v1, v2);
    if (ray_direction.dot(N) > 0)
    {
        N = N*(-1);
    }
    N.normalize();

    Vec3D lightray_origin = newStart;

    for (unsigned int itr = 0 ; itr < world.light_positions.size() ; itr++)
    {

        Vec3D lightray_dir = world.light_positions[0] - newStart;
        lightray_dir.normalize();

        double cos_theta = max(N.dot(lightray_dir), 0.0);
        objColor.setX(objColor.getX() + hitObject->getDiffuse().getX()*hitObject->getDiffuseReflection()*cos_theta);
        objColor.setY(objColor.getY() + hitObject->getDiffuse().getY()*hitObject->getDiffuseReflection()*cos_theta);
        objColor.setZ(objColor.getZ() + hitObject->getDiffuse().getZ()*hitObject->getDiffuseReflection()*cos_theta);
        return objColor;
    }

    depth++;

} while(coeff > 0 && depth < MAX_RAY_DEPTH);
return objColor;

}

当我用主光线到达一个物体时,我向位于 (0,0,0) 处的光源发送另一条光线,并根据 Phong 照明模型返回颜色进行漫反射,但结果确实不是预期的:http ://s15.postimage.org/vc6uyyssr/test.png 。立方体是一个以 (0,0,0) 为中心然后平移 (1.5, -1.5, -1.5) 的单位立方体。从我的角度来看,立方体的左侧应该得到更多的光线,而且确实如此。你怎么看呢?

于 2012-11-21T17:20:45.767 回答