我正在尝试创建一个着色器,它可以扭曲和镜像双焦镜头作为 VROne 看到的视频流。
VROne 提供了一个统一包,我在其中找到了查找表 (LUT),它指定了失真变换。
我们有一组 6 个查找表,每种颜色 2 个。
LUT_XB、LUT_YB 以及 LUT_XG、LUTYG、LUT_XR、LUT_YR。结果显示在这里。
蔡司提供的代码:
//
// Copyright (C) 2014 - Carl Zeiss AG
//
// Distortion Shader
Shader "VROneSDK/LUTDistortion" {
Properties {
_MainTex ("Base (RGB)", 2D) = "white" {}
// We have a set of LUTs.
// for each color two. One for the x-direction
// and one for the y-direction. _LUT<direction>Tex<Color>
_LUTXTexR ("LUTXR", 2D) = "white" {}
_LUTYTexR ("LUTYR", 2D) = "white" {}
_LUTXTexG ("LUTXG", 2D) = "white" {}
_LUTYTexG ("LUTYG", 2D) = "white" {}
_LUTXTexB ("LUTXB", 2D) = "white" {}
_LUTYTexB ("LUTYB", 2D) = "white" {}
}
SubShader {
Pass {
CGPROGRAM
// We need to define a vertex shader even though
// we don't use one. Without this is an invalid
// shader for GLES.
#pragma target 3.0
#pragma vertex vert_img
// Fragement shader is function frag.
#pragma fragment frag
// Use unity
#include "UnityCG.cginc"
// We will use two variables _MainTex contains
// the source texture
uniform sampler2D _MainTex;
// One Texture for each LUT (direction and color).
uniform sampler2D _LUTXTexR;
uniform sampler2D _LUTYTexR;
uniform sampler2D _LUTXTexG;
uniform sampler2D _LUTYTexG;
uniform sampler2D _LUTXTexB;
uniform sampler2D _LUTYTexB;
// mirror flag for right eye (textures are for left
// eye and mirrored for right eye)
uniform bool _isMirrored;
// Decoding a color value from the
// texture into a float. Similar to unitys
// DecodeFloatRGBA and DecodeFloatRG.
float DecodeFloatRGB(float3 rgb) {
return dot(rgb, float3(1.0,1.0/255.0,1.0/65025.0));
}
float mirrored(float coord) {
if (_isMirrored) {
return 1.0f - coord;
}
return coord;
}
// compute new lookup position from a LUT.zz
// For each color we extract the rgb value at the
// coordinate we are interested in. This rgb value
// indicates which pixel (or interpolated pixel) we
// need to map.
float2 LUTDistortionR(float2 coord)
{
float3 lookupX = tex2D(_LUTXTexR, coord).rgb;
float3 lookupY = tex2D(_LUTYTexR, coord).rgb;
return float2(mirrored(DecodeFloatRGB(lookupX)),DecodeFloatRGB(lookupY));
}
float2 LUTDistortionG(float2 coord)
{
float3 lookupX = tex2D(_LUTXTexG, coord).rgb;
float3 lookupY = tex2D(_LUTYTexG, coord).rgb;
return float2(mirrored(DecodeFloatRGB(lookupX)),DecodeFloatRGB(lookupY));
}
float2 LUTDistortionB(float2 coord)
{
float3 lookupX = tex2D(_LUTXTexB, coord).rgb;
float3 lookupY = tex2D(_LUTYTexB, coord).rgb;
return float2(mirrored(DecodeFloatRGB(lookupX)),DecodeFloatRGB(lookupY));
}
float4 frag(v2f_img i) : COLOR
{
// our result will be initialized to 0/0/0.
float3 res = float3(0.0f,0.0f,0.0f);
// Get the target (u,v) coordinate (i.uv)
// which is where we will draw the pixel.
// What we will draw, depends on the color
// and the distortion, which we can look up in
// the LUT. We do this for each color and do
// not put xy in rb or similar to allow us to
// improve precision with the DecodeFloatRGB method,
// as can be seen above.
// since textures are for left eye only, we need to
// "mirror" the input coordinate for the right eye.
float2 coord = float2(mirrored(i.uv.x), i.uv.y);
float2 xyR = LUTDistortionR(coord);
if (xyR.x <= 0.0f || xyR.y <= 0.0f || xyR.x >= 1.0f || xyR.y >= 1.0f) {
// set alpha to 1 and return.
return float4(res, 1.0f);
}
float2 xyG = LUTDistortionG(coord);
if (xyG.x <= 0.0f || xyG.y <= 0.0f || xyG.x >= 1.0f || xyG.y >= 1.0f) {
// set alpha to 1 and return.
return float4(res, 1.0f);
}
float2 xyB = LUTDistortionB(coord);
if (xyB.x <= 0.0f || xyB.y <= 0.0f || xyG.x >= 1.0f || xyG.y >= 1.0f) {
// set alpha to 1 and return.
return float4(res, 1.0f);
}
res = float3(tex2D(_MainTex,xyR).r,
tex2D(_MainTex,xyG).g,
tex2D(_MainTex,xyB).b);
// set alpha to 1 and return.
return float4(res, 1.0f);
}
ENDCG
}
}
我想创建片段着色器,它转换 YUV 帧流(我为每个 y、u、v 创建了一个 sampler2D:sampler2D s_texture_y ...)。
我实现的代码:
NSString *const vertexShaderString = SHADER_STRING
(
attribute vec4 position;
attribute vec2 texcoord;
uniform mat4 modelViewProjectionMatrix;
varying vec2 v_texcoord;
void main()
{
gl_Position = modelViewProjectionMatrix * position;
v_texcoord = texcoord.xy;
}
);
NSString *const distortionVROneShaderString = SHADER_STRING
(
precision highp float;
varying highp vec2 v_texcoord;
uniform sampler2D s_texture_y;
uniform sampler2D s_texture_u;
uniform sampler2D s_texture_v;
uniform sampler2D LUTXTexR;
uniform sampler2D LUTYTexR;
uniform sampler2D LUTXTexG;
uniform sampler2D LUTYTexG;
uniform sampler2D LUTXTexB;
uniform sampler2D LUTYTexB;
float DecodeFloatRGB(vec3 rgb) {
return dot(rgb, vec3(1.0,1.0/255.0,1.0/65025.0));
}
vec2 LUTDistortionR(vec2 coord)
{
vec3 lookupX = texture2D(LUTXTexR, coord).rgb;
vec3 lookupY = texture2D(LUTYTexR, coord).rgb;
return vec2(DecodeFloatRGB(lookupX),DecodeFloatRGB(lookupY));
}
vec2 LUTDistortionG(vec2 coord)
{
vec3 lookupX = texture2D(LUTXTexG, coord).rgb;
vec3 lookupY = texture2D(LUTYTexG, coord).rgb;
return vec2(DecodeFloatRGB(lookupX),DecodeFloatRGB(lookupY));
}
vec2 LUTDistortionB(vec2 coord)
{
vec3 lookupX = texture2D(LUTXTexB, coord).rgb;
vec3 lookupY = texture2D(LUTYTexB, coord).rgb;
return vec2(DecodeFloatRGB(lookupX),DecodeFloatRGB(lookupY));
}
void main()
{
highp float y = texture2D(s_texture_y, v_texcoord).r * 1.0;
highp float u = texture2D(s_texture_u, v_texcoord).r - 0.5;
highp float v = texture2D(s_texture_v, v_texcoord).r - 0.5;
highp float r = y + 1.402 * v;
highp float g = y - 0.344 * u - 0.714 * v;
highp float b = y + 1.772 * u;
vec3 textureRGB = vec3(r,g,b);
vec3 res = vec3(0.0,0.0,0.0);
vec2 xyR = LUTDistortionR(v_texcoord);
vec2 xyG = LUTDistortionG(v_texcoord);
vec2 xyB = LUTDistortionB(v_texcoord);
gl_FragColor = vec4(texture2D(s_texture_y, xyR).r,texture2D(s_texture_u,xyG).r,texture2D(s_texture_v, xyB).r,1.0);// vec4(res,1.0);
}
);
我用这段代码得到的结果似乎并没有扭曲视频流,我怀疑作为 LUTXTexR、LUTXTexG、LUTXTexB ... 的 LUT 导入应该作为 bufferSample 完成。
我被卡住了,欢迎任何帮助、指示或教程链接,因为我找不到此特定应用程序的相关教程。