2

我目前在对使用行进四面体算法生成的网格进行纹理处理时遇到问题。该代码位于 .fx HLSL 文件中。问题的描述可能是纹理似乎根据相机的位置移动。例如,如果相机被左右扫射,纹理也会随之左右移动。左右平移相机时也会出现此问题。

这是一种奇怪的效果,而且很难描述,所以我附上了一些图像,纹理 AddressU 和 AddressV 设置为 Clamp,这样更容易演示问题。

http://i.imgur.com/JbyVZ.png

http://i.imgur.com/nDkB1.png

如您所见,将相机向右移动也会将纹理向右移动。

我可能错过了一些完全明显的东西,比如乘以某个矩阵(我试过)。任何帮助将不胜感激。

这是我的像素着色器代码。

float4 DiffusePS( SurfaceVertex IN ) : SV_Target

{

float4 AmbientColor = float4(0.2, 0.2, 0.2, 1);
float AmbientIntensity = 0.2;

float4 Kd = 0.5;
float4 diffuseLight = 0.5;

float4 Pos = GetWorldSpacePos( IN.Pos );
float3 N = normalize(IN.N);
float3 L1 = normalize(LightPos1 - Pos.xyz);
float3 L2 = normalize(LightPos2 - Pos.xyz);
float3 L3 = normalize(LightPos3 - Pos.xyz);

float NdotL1 = max(0, dot(N, L1));
float NdotL2 = max(0, dot(N, L2));
float NdotL3 = max(0, dot(N, L3));

float3 I = normalize(Pos.xyz);
float3 V = normalize(-Pos.xyz);

float4 vDiff = diffuseLight * Kd * NdotL1;
float4 vDiff2 = diffuseLight * Kd * NdotL2;
float4 vDiff3 = diffuseLight * Kd * NdotL3;

float3 Color = vDiff + vDiff2 + vDiff3;
float4 derp = rockTexture.Sample(RockSampler, IN.tex.xy);

return lerp(derp ,float4(Color, 1), 0.5);

谢谢你的帮助

编辑: .fx 文件的其余部分

#define MAX_METABALLS   400
#define IOR             2.5

#define PI 3.1415

Buffer<float4> SampleDataBuffer;

struct SampleData
{
    float4 Pos : SV_Position;
    float4 Field : TEXCOORD0;   // Gradient in .xyz, value in .w
};

struct SurfaceVertex
{
    float4 Pos : SV_Position;
    float3 N : NORMAL;
    float2 tex : TEXCOORD;
};

cbuffer constants
{
    float R0Constant = ((1.0 - (1.0/IOR)) * (1.0 - (1.0/IOR))) / ((1.0 + (1.0/IOR)) * (1.0 + (1.0/IOR)));
    float R0Inv = 1.0 - ((1.0 - (1.0/IOR)) * (1.0 - (1.0/IOR)))/((1.0 + (1.0/IOR)) * (1.0 + (1.0/IOR)));
};


cbuffer cb0 : register(b0)
{
    row_major float4x4 ProjInv;
    row_major float3x3 ViewIT;
    row_major float4x4 WorldViewProj;
    row_major float4x4 World;

    uint NumMetaballs;
    float4 Metaballs[MAX_METABALLS];    // .xyz -> metaball center, .w -> metaball squared radius

    float3 ViewportOrg;
    float3 ViewportSizeInv;

    float3 LightPos1;        // view-space light position 1
    float3 LightPos2;        // view-space light position 2
    float3 LightPos3;        // view-space light position 3


};

Texture2D rockTexture;

SamplerState RockSampler
{
    Filter = MIN_MAG_MIP_LINEAR;
    AddressU = Wrap;
    AddressV = Wrap;
};



float4 GetWorldSpacePos( float4 WindowPos )
{
    float4 ClipPos;
    ClipPos.x = (2 * ((WindowPos.x - ViewportOrg.x) * ViewportSizeInv.x) - 1);
    ClipPos.y = (-2 * ((WindowPos.y - ViewportOrg.y) * ViewportSizeInv.y) + 1);
    ClipPos.z = ((WindowPos.z - ViewportOrg.z) * ViewportSizeInv.z);
    ClipPos.w = 1;

    float4 Pos;
    Pos = mul(ClipPos, ProjInv);    // backtransform clipspace position to get viewspace position
    Pos.xyz /= Pos.w;               // re-normalize


    return Pos;
}


// Metaball function
// Returns metaball function value in .w and its gradient in .xyz
float4 Metaball(float3 Pos, float3 Center, float RadiusSq)
{
    float4 o;

    float3 d = Pos - Center;
    float DistSq = dot(d, d);
    float InvDistSq = 1 / DistSq;

    o.xyz = -2 * RadiusSq * InvDistSq * InvDistSq * d;
    o.w = RadiusSq * InvDistSq;

    return o;
}


SamplerState TriLinearSampler
{
    Filter = MIN_MAG_MIP_LINEAR;
    AddressU = WRAP;
    AddressV = WRAP;
};


// Vertex shader calculates field contributions at each grid vertex
SampleData SampleFieldVS(float3 Pos : POSITION)
{
    SampleData o;

    float3 WorldPos = mul(float4(Pos, 1), World).xyz;

    // Sum up contributions from all metaballs

    o.Field = 0;

    for (uint i = 0; i<NumMetaballs; i++)
    {
        //o.Field += WorldPos.y;
        o.Field += Metaball(WorldPos, Metaballs[i].xyz, Metaballs[i].w);


    }
    // Transform position and normals


    o.Pos = mul(float4(Pos.xyz, 1), WorldViewProj);
    o.Field.xyz = -normalize(mul(o.Field.xyz, ViewIT));  // we want normals in view space


    // Generate in-out flags

    return o;
}



SampleData PassThroughVS(SampleData IN)
{
    SampleData OUT;
    OUT = IN;
    return OUT;
}

// Estimate where isosurface intersects grid edge with endpoints v0, v1
SurfaceVertex CalcIntersection(SampleData v0, SampleData v1)
{
    SurfaceVertex o;

    // We're taking special care to generate bit-exact results regardless of traversal (v0,v1) or (v1, v0)

    float t = (2.0 - (v0.Field.w + v1.Field.w)) / (v1.Field.w - v0.Field.w);

    o.Pos = 0.5 * (t * (v1.Pos - v0.Pos) + (v1.Pos + v0.Pos));
    o.N = 0.5 * (t * (v1.Field.xyz - v0.Field.xyz) + (v1.Field.xyz + v0.Field.xyz));    

    float4 worldPos = mul(World, o.Pos);
    o.tex = worldPos.xy;

    return o;
}

// This struct stores vertex indices of up to 4 edges from the input tetrahedron. The GS code below 
// uses these indices to index into the input vertex set for interpolation along those edges. 
// It basically encodes topology for the output triangle strip (of up to 2 triangles).
struct TetrahedronIndices 
{ 
    uint4 e0; 
    uint4 e1; 
};

[MaxVertexCount(4)]
void TessellateTetrahedraGS(lineadj SampleData In[4], inout TriangleStream<SurfaceVertex> Stream)
{
    // construct index for this tetrahedron
    uint index = (uint(In[0].Field.w > 1) << 3) | (uint(In[1].Field.w > 1) << 2) | (uint(In[2].Field.w > 1) << 1) | uint(In[3].Field.w > 1);


    // don't bother if all vertices out or all vertices in
    if (index > 0 && index < 15)
    {
        uint4 e0 = EdgeTableGS[index].e0;
        uint4 e1 = EdgeTableGS[index].e1;

        // Emit a triangle
        Stream.Append( CalcIntersection(In[e0.x], In[e0.y]) );
        Stream.Append( CalcIntersection(In[e0.z], In[e0.w]) );
        Stream.Append( CalcIntersection(In[e1.x], In[e1.y]) );

        // Emit additional triangle, if necessary
        if (e1.z != 0) {
            Stream.Append( CalcIntersection(In[e1.z], In[e1.w]) );
        }

    }
}

TextureCube EnvMap;

float FresnelApprox(float3 I, float3 N)
{
    return R0Constant + R0Inv * pow(1.0 - dot(I, N), 5.0);
}

float4 ShadeSurfacePS( SurfaceVertex IN ) : SV_Target
{
    float4 Pos = GetWorldSpacePos( IN.Pos );

    float3 N = normalize(IN.N);
    float3 L1 = normalize(LightPos1 - Pos.xyz);
    float3 L2 = normalize(LightPos2 - Pos.xyz);
    float3 L3 = normalize(LightPos3 - Pos.xyz);
    float3 I = normalize(Pos.xyz);

    float3 R = reflect(I, N);

    float4 Reflected = EnvMap.Sample( TriLinearSampler, mul(ViewIT, R ) );

    float NdotL1 = max(0, dot(N, L1));
    float NdotL2 = max(0, dot(N, L2));
    float NdotL3 = max(0, dot(N, L3));

    float3 Color = NdotL1 * float3(1, 1, 1) + pow(max(dot(R, L1), 0), 32)
                    + NdotL2 * float3(0.65, 0.6, 0.45) + pow(max(dot(R, L2), 0), 32)
                    + NdotL3 * float3(0.7, 0.7, 0.8) + pow(max(dot(R, L3), 0), 32);

    return lerp(EnvMap.Sample( TriLinearSampler, mul(ViewIT, R) ), float4(Color, 1), FresnelApprox(I, N) * 0.05 );

}

float4 SimplePS( SurfaceVertex IN, uniform float4 color ) : SV_Target
{
    return color;
}

float4 DiffusePS( SurfaceVertex IN ) : SV_Target
{

    float4 AmbientColor = float4(0.2, 0.2, 0.2, 1);
    float AmbientIntensity = 0.2;

    float4 Kd = 0.5;
    float4 diffuseLight = 0.5;

    float4 Pos = GetWorldSpacePos( IN.Pos );
    float3 N = normalize(IN.N);
    float3 L1 = normalize(LightPos1 - Pos.xyz);
    float3 L2 = normalize(LightPos2 - Pos.xyz);
    float3 L3 = normalize(LightPos3 - Pos.xyz);

    float NdotL1 = max(0, dot(N, L1));
    float NdotL2 = max(0, dot(N, L2));
    float NdotL3 = max(0, dot(N, L3));

    float3 I = normalize(Pos.xyz);
    float3 V = normalize(-Pos.xyz);

    float4 vDiff = diffuseLight * Kd * NdotL1;
    float4 vDiff2 = diffuseLight * Kd * NdotL2;
    float4 vDiff3 = diffuseLight * Kd * NdotL3;

    float3 Color = vDiff + vDiff2 + vDiff3;
    float4 derp = rockTexture.Sample(RockSampler, IN.tex.xy);

    return lerp(derp ,float4(Color, 1), 0.5);
    //return lerp(NoiseTexture.Sample( NoiseSampler, IN.tex ), float4(Color, 1), FresnelApprox(V, N) * 0.05 );

    //return saturate(vDiff+vDiff2+vDiff3 + AmbientColor * AmbientIntensity);


}

DepthStencilState EnableDepthDSS
{
    DepthEnable = true;
    DepthWriteMask = 1;
};

RasterizerState WireFrameRS
{
    MultiSampleEnable = True;
    CullMode = None;
    FillMode = WireFrame;
};

RasterizerState SolidRS
{
    MultiSampleEnable = True;
    CullMode = None;
    FillMode = Solid;
};


technique10 MarchingTetrahedraWireFrame
{
    pass P0
    {
        SetRasterizerState( WireFrameRS );
        SetDepthStencilState( EnableDepthDSS, 0 );

        SetVertexShader( CompileShader( vs_4_0, SampleFieldVS() ) );
        SetGeometryShader( CompileShader( gs_4_0, TessellateTetrahedraGS() ) );
        SetPixelShader( CompileShader( ps_4_0, SimplePS( float4( 0.7, 0.7, 0.7, 1 ) ) ) );
    }
}

// Tessellate isosurface in a single pass
technique10 MarchingTetrahedraSinglePassGS
{
    pass P0
    {
        SetRasterizerState( SolidRS );
        SetDepthStencilState( EnableDepthDSS, 0 );

        SetVertexShader( CompileShader( vs_4_0, SampleFieldVS() ) );
        SetGeometryShader( CompileShader( gs_4_0, TessellateTetrahedraGS() ) );
        SetPixelShader( CompileShader( ps_4_0, DiffusePS() ) );
    }
}

// Tessellate isosurface in two passes, streaming out VS results in-between
GeometryShader StreamOutGS = ConstructGSWithSO( CompileShader( vs_4_0, PassThroughVS() ), "SV_Position.xyzw; TEXCOORD0.xyzw" );

technique10 MarchingTetrahedraMultiPassGS
{
    pass P0
    {
        SetVertexShader( CompileShader( vs_4_0, SampleFieldVS() ) );
        SetGeometryShader( StreamOutGS );
        SetPixelShader( NULL );
    }

    pass P1
    {

        SetRasterizerState( SolidRS );
        SetDepthStencilState( EnableDepthDSS, 0 );

        SetVertexShader( CompileShader ( vs_4_0, PassThroughVS() ) );
        SetGeometryShader( CompileShader( gs_4_0, TessellateTetrahedraGS() ) );
        SetPixelShader( CompileShader( ps_4_0, DiffusePS() ) );
    }
}
4

1 回答 1

0

纹理坐标当前是通过将顶点位置乘以世界矩阵来生成的:

CalcIntersection()
....
    float4 worldPos = mul(World, o.Pos);
    o.tex = worldPos.xy;
....

因此,摄像机平移必须改变这些输入之一。猜测它可能是世界矩阵(即相机平移实际上是被移动的对象),尝试切换到不依赖于世界矩阵的纹理坐标生成方法。例如:

CalcIntersection()
...
    o.tex = o.Pos.xy;
...


编辑:因为那不起作用,它必须是被相机平移修改的位置,并且看着 SampleFieldVS() 有一条线似乎正在这样做:

o.Pos = mul(float4(Pos.xyz, 1), WorldViewProj);

因此,您需要做的是在修改之前将位置保存在那里。您必须将其保存在 SampleData 中,因此将其添加到 SampleData 结构的末尾:

float2 tex1 : TEXCOORD1;

然后在 SampleFieldVS() 末尾添加保存行

o.tex1 = mul(float4(Pos.xyz, 0), World).xy;

最后删除 ' float4 worldPos = mul(World, o.Pos); o.tex = worldPos.xy; ' 在 CalcIntersection() 的末尾并替换为:

o.tex = 0.5 * (t * (v1.tex1 - v0.tex1) + (v1.tex1 + v0.tex1));
于 2012-04-18T21:03:23.447 回答