0

我正在尝试从存储在GL_DEPTH_ATTACHMENT. 为此,我将深度线性化,然后将深度乘以从相机位置到远平面上相应点的光线。

这种方法是这里描述的第二种方法。为了让光线从相机到达远平面,我将光线检索到远平面的四个角,将它们传递给我的顶点着色器,然后插入到片段着色器中。我正在使用以下代码将光线从相机获取到世界空间中远平面的角落。

std::vector<float> Camera::GetFlatFarFrustumCorners() {
    // rotation is the orientation of my camera in a quaternion.
    glm::quat inverseRotation = glm::inverse(rotation);
    glm::vec3 localUp = glm::normalize(inverseRotation * glm::vec3(0.0f, 1.0f, 0.0f));
    glm::vec3 localRight = glm::normalize(inverseRotation * glm::vec3(1.0f, 0.0f, 0.0f));
    float farHeight = 2.0f * tan(90.0f / 2) * 100.0f;
    float farWidth = farHeight * aspect;

    // 100.0f is the distance to the far plane. position is the location of the camera in word space.
    glm::vec3 farCenter = position + glm::vec3(0.0f, 0.0f, -1.0f) * 100.0f;
    glm::vec3 farTopLeft = farCenter + (localUp * (farHeight / 2)) - (localRight * (farWidth / 2));
    glm::vec3 farTopRight = farCenter + (localUp * (farHeight / 2)) + (localRight * (farWidth / 2));
    glm::vec3 farBottomLeft = farCenter - (localUp * (farHeight / 2)) - (localRight * (farWidth / 2));
    glm::vec3 farBottomRight = farCenter - (localUp * (farHeight / 2)) + (localRight * (farWidth / 2));

    return { 
        farTopLeft.x, farTopLeft.y, farTopLeft.z,
        farTopRight.x, farTopRight.y, farTopRight.z,
        farBottomLeft.x, farBottomLeft.y, farBottomLeft.z,
        farBottomRight.x, farBottomRight.y, farBottomRight.z
    };
}

这是在世界空间中检索远平面角落的正确方法吗?

当我将这些角与我的着色器一起使用时,结果是不正确的,我得到的似乎是在视图空间中。这些是我正在使用的着色器:

顶点着色器:

layout(location = 0) in vec2 vp;
layout(location = 1) in vec3 textureCoordinates;

uniform vec3 farFrustumCorners[4];
uniform vec3 cameraPosition;

out vec2 st;
out vec3 frustumRay;

void main () {
    st = textureCoordinates.xy;
    gl_Position = vec4 (vp, 0.0, 1.0);
    frustumRay = farFrustumCorners[int(textureCoordinates.z)-1] - cameraPosition;
}

片段着色器:

in vec2 st;
in vec3 frustumRay;

uniform sampler2D colorTexture;
uniform sampler2D normalTexture;
uniform sampler2D depthTexture;

uniform vec3 cameraPosition;
uniform vec3 lightPosition;

out vec3 color;

void main () {
    // Far and near distances; Used to linearize the depth value.
    float f = 100.0;
    float n = 0.1;
    float depth = (2 * n) / (f + n - (texture(depthTexture, st).x) * (f - n));
    vec3 position = cameraPosition + (normalize(frustumRay) * depth);
    vec3 normal = texture(normalTexture, st);


    float k = 0.00001;
    vec3 distanceToLight = lightPosition - position;
    float distanceLength = length(distanceToLight);
    float attenuation = (1.0 / (1.0 + (0.1 * distanceLength) + k * (distanceLength * distanceLength)));
    float diffuseTemp = max(dot(normalize(normal), normalize(distanceToLight)), 0.0);
    vec3 diffuse = vec3(1.0, 1.0, 1.0) * attenuation * diffuseTemp;

    vec3 gamma = vec3(1.0/2.2);
    color = pow(texture(colorTexture, st).xyz+diffuse, gamma);

    //color = texture(colorTexture, st);
    //colour.r = (2 * n) / (f + n - texture( tex, st ).x * (f - n));
    //colour.g = (2 * n) / (f + n - texture( tex, st ).y* (f - n));
    //colour.b = (2 * n) / (f + n - texture( tex, st ).z * (f - n));
}

这是我的场景照明在这些着色器下的样子: 可怕的灯光

我很确定这是我重建的位置完全错误的结果,或者它位于错误的空间中。我的重建出了什么问题,我能做些什么来解决它?

4

1 回答 1

2

您首先要做的是为您的 G-Buffer 设置开发一个临时添加,用于存储每个片段在世界/视图空间中的初始位置(实际上,无论您在此处尝试重建的任何空间)。然后编写一个着色器,除了从深度缓冲区中重建这些位置之外什么都不做。设置好一切,让屏幕的一半显示原始 G-Buffer,另一半显示重建的位置。您应该能够通过这种方式立即发现差异。

也就是说,您可能想看看我过去用来从深度缓冲区重建(对象空间)位置的实现。它基本上让你首先进入视图空间,然后使用逆模型视图矩阵进入对象空间。您可以轻松地为世界空间调整它。它可能不是最灵活的实现,因为 FOV 是硬编码的,但你可以轻松地修改它以使用制服......

修剪片段着色器:

flat in mat4 inv_mv_mat;
     in vec2 uv;

...

float linearZ (float z)
{
#ifdef INVERT_NEAR_FAR
  const float f = 2.5;
  const float n = 25000.0;
#else
  const float f = 25000.0;
  const float n = 2.5;
#endif

  return n / (f - z * (f - n)) * f;
}

vec4
reconstruct_pos (float depth)
{
  depth = linearZ (depth);

  vec4 pos = vec4 (uv * depth, -depth, 1.0); 
  vec4 ret = (inv_mv_mat * pos);

  return ret / ret.w;
}

在延迟着色光照通道的顶点着色器阶段需要进行一些额外的设置,如下所示:

#version 150 core

in       vec4 vtx_pos;
in       vec2 vtx_st;

uniform  mat4 modelview_mat; // Matrix used when the G-Buffer was built
uniform  mat4 camera_matrix; // Matrix used to stretch the G-Buffer over the viewport

uniform float buffer_res_x;
uniform float buffer_res_y;

     out vec2 tex_st;
flat out mat4 inv_mv_mat;
     out vec2 uv;


// Hard-Coded 45 degree FOV
//const float fovy = 0.78539818525314331; // NV pukes on the line below!
//const float fovy = radians (45.0);
//const float tan_half_fovy = tan (fovy * 0.5);

const float   tan_half_fovy = 0.41421356797218323;

      float   aspect        = buffer_res_x / buffer_res_y;
      vec2    inv_focal_len = vec2 (tan_half_fovy * aspect,
                                    tan_half_fovy);

const vec2    uv_scale     = vec2 (2.0, 2.0);
const vec2    uv_translate = vec2 (1.0, 1.0);


void main (void)
{
  inv_mv_mat  = inverse (modelview_mat);
  tex_st      = vtx_st;
  gl_Position = camera_matrix * vtx_pos;
  uv          = (vtx_st * uv_scale - uv_translate) * inv_focal_len;
}

深度范围反转可能对延迟着色很有用,通常透视深度缓冲区可以为您提供比近距离所需的精度更高的精度,而远距离则不足以进行质量重建。如果您通过反转深度范围来翻转他们的头,您可以在仍然使用硬件深度缓冲区的同时将事情弄平一点。这将在此处详细讨论。

于 2013-11-03T06:42:27.363 回答