1

我目前正在编写一个 cel 着色着色器,但我遇到了边缘检测问题。我目前正在使用以下代码,利用对非线性深度缓冲区值进行拉普拉斯边缘检测:

uniform sampler2d depth_tex;
void main(){
vec4 color_out;
float znear = 1.0;
float zfar = 50000.0;
float depthm = texture2D(depth_tex, gl_TexCoord[0].xy).r;
float lineAmp = mix( 0.001, 0.0, clamp( (500.0 / (zfar + znear - ( 2.0 * depthm - 1.0 ) * (zfar - znear) )/2.0), 0.0, 1.0 ) );// make the lines thicker at close range

float depthn = texture2D(depth_tex, gl_TexCoord[0].xy + vec2( (0.002 + lineAmp)*0.625 , 0.0) ).r;
depthn = depthn / depthm;

float depths = texture2D(depth_tex, gl_TexCoord[0].xy - vec2( (0.002 + lineAmp)*0.625 , 0.0) ).r;
depths = depths / depthm;

float depthw = texture2D(depth_tex, gl_TexCoord[0].xy + vec2(0.0 , 0.002 + lineAmp) ).r;
depthw = depthw / depthm;

float depthe = texture2D(depth_tex, gl_TexCoord[0].xy - vec2(0.0 , 0.002 + lineAmp) ).r;
depthe = depthe / depthm;

float Contour = -4.0 + depthn + depths + depthw + depthe;

float lineAmp2 = 100.0 * clamp( depthm - 0.99, 0.0, 1.0);
lineAmp2 = lineAmp2 * lineAmp2;
Contour = (512.0 + lineAmp2 * 204800.0 ) * Contour;

if(Contour > 0.15){
    Contour = (0.15 - Contour) / 1.5 + 0.5;
} else
    Contour = 1.0;

color_out.rgb = color_out.rgb * Contour;
    color_out.a = 1.0;
    gl_FragColor = color_out;
}

但它是hackish [注意lineAmp2],并且丢失了远距离的细节。所以我编造了一些其他的算法:

[注意拉普拉斯边缘检测正在使用]

1.从深度缓冲区中获取5个样本:depthm,depthn,depths,depthw,depthe,其中depthm是处理后的片段所在的位置,depthn是稍微靠上的,depths是稍微靠下的等等。

2.计算它们在相机空间中的真实坐标[以及转换为线性]。

3.通过减法将侧面样本与中间样本进行比较,然后通过除以两个相机空间点之间的距离差来归一化每个差异,并将所有四个结果相加。这在理论上应该有助于解决这样的情况,即在距离相机很远的地方,两个片段在屏幕上非常接近,但在相机空间中却非常远,这对于线性深度测试来说是致命的。

在哪里:

2.a 使用来自 [url=http://stackoverflow.com/questions/6652253/getting-the-true-z-value-from-the-depth-buffer]http:/ 的算法将非线性深度转换为线性深度/stackoverflow.com/questions/6652253/getting-the-true-z-value-from-the-depth-buffer[/url] 确切代码:

uniform sampler2D depthBuffTex;
uniform float zNear;
uniform float zFar;
varying vec2 vTexCoord;
void main(void)
{
float z_b = texture2D(depthBuffTex, vTexCoord).x;
float z_n = 2.0 * z_b - 1.0;
float z_e = 2.0 * zNear * zFar / (zFar + zNear - z_n * (zFar - zNear));
}

2.b 将屏幕坐标转换为[tan a, tan b],其中a为水平角,bi为垂直角。可能有一些球坐标更好的术语,但我还不知道这些。

2.c 创建一个 3d 矢量(转换后的屏幕坐标, 1.0 )并按线性深度对其进行缩放。我假设这是片段的估计相机空间坐标。它看起来像。

3.a 各差异如下:(depthm - sidedepth)/lenght( positionm - sideposition)

而且我可能在任何时候都搞砸了。代码看起来不错,但算法可能不是,因为我自己编的。

我的代码:

uniform sampler2d depth_tex;
void main(){
float znear = 1.0;
float zfar = 10000000000.0;

float depthm = texture2D(depth_tex, gl_TexCoord[0].xy + distort ).r;
depthm = 2.0 * zfar * znear / (zfar + znear - ( 2.0 * depthm - 1.0 ) * (zfar - znear) );    //convert to linear
vec2 scorm = (gl_TexCoord[0].xy + distort) -0.5;    //conversion to desired coordinates space. This line returns value from range (-0.5,0.5)
scorm = scorm * 2.0 * 0.5;     // normalize to (-1, 1) and multiply by tan FOV/2, and default fov is IIRC 60 degrees
scorm.x = scorm.x * 1.6;      //1.6 is aspect ratio 16/10
vec3 posm = vec3( scorm, 1.0 );
posm = posm * depthm;      //scale by linearized depth



float depthn = texture2D(depth_tex, gl_TexCoord[0].xy + distort + vec2( 0.002*0.625  , 0.0) ).r;          //0.625 is aspect ratio 10/16
depthn = 2.0 * zfar * znear / (zfar + znear - ( 2.0 * depthn - 1.0 ) * (zfar - znear) );
vec2 scorn = (gl_TexCoord[0].xy + distort + vec2( 0.002*0.625, 0.0) ) -0.5;
scorn = scorn * 2.0 * 0.5;
scorn.x = scorn.x * 1.6;
vec3 posn = vec3( scorn, 1.0 );
posn = posn * depthn;

float depths = texture2D(depth_tex, gl_TexCoord[0].xy + distort - vec2( 0.002*0.625 , 0.0) ).r;
depths = 2.0 * zfar * znear / (zfar + znear - ( 2.0 * depths - 1.0 ) * (zfar - znear) );
vec2 scors = (gl_TexCoord[0].xy + distort - vec2( 0.002*0.625, 0.0) ) -0.5;
scors = scors * 2.0 * 0.5;
scors.x = scors.x * 1.6;
vec3 poss = vec3( scors, 1.0 );
poss = poss * depths;

float depthw = texture2D(depth_tex, gl_TexCoord[0].xy + distort + vec2(0.0 , 0.002) ).r;
depthw = 2.0 * zfar * znear / (zfar + znear - ( 2.0 * depthw - 1.0 ) * (zfar - znear) );
vec2 scorw = ( gl_TexCoord[0].xy + distort + vec2( 0.0 , 0.002) ) -0.5;
scorw = scorw * 2.0 * 0.5;
scorw.x = scorw.x * 1.6;
vec3 posw = vec3( scorw, 1.0 );
posw = posw * depthw;

float depthe = texture2D(depth_tex, gl_TexCoord[0].xy + distort - vec2(0.0 , 0.002) ).r;
depthe = 2.0 * zfar * znear / (zfar + znear - ( 2.0 * depthe - 1.0 ) * (zfar - znear) );
vec2 score = ( gl_TexCoord[0].xy + distort - vec2( 0.0 , 0.002) ) -0.5;
score = score * 2.0 * 0.5;
score.x = score.x * 1.6;
vec3 pose = vec3( score, 1.0 );
pose = pose * depthe;

float Contour = ( depthn - depthm )/length(posm - posn) + ( depths - depthm )/length(posm - poss) + ( depthw - depthm )/length(posm - posw) + ( depthe - depthm )/length(posm - pose);
Contour = 0.25 * Contour;

color_out.rgb = vec3( Contour, Contour, Contour );
    color_out.a = 1.0;
    gl_FragColor = color_out;
}

第二个代码的确切问题是它在更远的距离处显示出一些可怕的伪影。

我的目标是使它们中的任何一个都能正常工作。有什么技巧可以用来提高线性化和非线性深度缓冲区的精度/质量吗?我的线性化深度缓冲区算法有什么问题吗?

4

0 回答 0