我无法获得正确的屏幕坐标,所以我制作了这个小型 MWE 来复制我在主项目中看到的那种行为。我在 Unity 3D 中使用单通道渲染和 Windows 混合现实耳机。我有一个着色器,它被设置为一个新的材质“ppMaterial”,然后它又被用于
void OnRenderImage(RenderTexture source, RenderTexture destination)
{
Graphics.Blit(source, destination, ppMaterial);
}
在这个 MWE 中,着色器只是应该在屏幕边缘绘制圆圈,所以我制作了这个简单的着色器:
Shader "Custom/ScreenShader" {
Properties {
}
SubShader {
Pass {
CGPROGRAM
#pragma fragment frag
#pragma vertex vert
#include "UnityCG.cginc"
uniform sampler2D _MainTex;
half4 _MainTex_ST;
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float4 vertex : SV_POSITION;
float2 uv : TEXCOORD0;
float4 screenPos : TEXCOORD1;
};
float getDist(float2 isIn, float2 pos2){
return sqrt((isIn.x - pos2.x)*(isIn.x - pos2.x) + (isIn.y - pos2.y)*(isIn.y - pos2.y));
}
float4 NearBoundary(float2 isIn, v2f i)
{
float2 pos2 = float2(0.5,0.5);
float2 pos3 = float2(0,0.5);
float2 pos4 = float2(0.5,0.0);
float dist2 = getDist(isIn, pos2);
float dist3 = getDist(isIn, pos3);
float dist4 = getDist(isIn, pos4);
if(dist2<0.1){
return float4(0,0.5,0.5,1);
}
if(dist3.x<0.3){
return float4(0.5,0,0.5,1);
}
if(dist4.x<0.3){
return float4(0.5,0,0.5,1);
}
return tex2D(_MainTex, UnityStereoScreenSpaceUVAdjust(i.uv, _MainTex_ST));
}
v2f vert (appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.screenPos = ComputeScreenPos(o.vertex);
o.uv = v.uv;
return o;
}
half4 frag(v2f i) : SV_Target {
float2 screenPos = i.screenPos;
return NearBoundary(screenPos, i);
}
ENDCG
}
}
}
但这不起作用,点没有分别绘制在它们应该在的中心和边界处。如何在(0,1)范围内为每只眼睛获取正确的屏幕坐标?