0

这是我第一次在 WebGL 上下文中使用顶点着色器。我想用视频纹理基元,但不仅仅是将视频映射到表面,我试图将视频的亮度转换为顶点位移。这有点像 Rutt Etra,但采用数字格式。明亮的像素应该将顶点向前推,而较暗的像素则相反。谁能告诉我我做错了什么?我找不到此错误的参考。

编译我的代码时,我在使用 sampler2D 和 texture2D 时得到以下信息:

Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.65 Safari/537.36 | WebGL 1.0 (OpenGL ES 2.0 Chromium) | 网络套件 | WebKit WebGL | WebGL GLSL ES 1.0(OpenGL ES GLSL ES 1.0 Chromium) Three.js:264 错误:0:57:“ftransform”:找不到匹配的重载函数错误:0:57:“assign”:无法从“const mediump float”转换到“定位 highp 4 分量浮点向量”错误:0:60:“gl_TextureMatrix”:未声明的标识符错误:0:60:“gl_TextureMatrix”:“[”左侧不是数组、矩阵或向量类型
错误: 0:60:'gl_MultiTexCoord0':未声明的标识符 Three.js:257

 <!doctype html>
<html>
    <head>
        <title>boiler plate for three.js</title>
        <meta charset="utf-8">
        <meta name="viewport" content="width=device-width, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0">

        <script src="vendor/three.js/Three.js"></script>
        <script src="vendor/three.js/Detector.js"></script>
        <script src="vendor/three.js/Stats.js"></script>
        <script src="vendor/threex/THREEx.screenshot.js"></script>
        <script src="vendor/threex/THREEx.FullScreen.js"></script>
        <script src="vendor/threex/THREEx.WindowResize.js"></script>
        <script src="vendor/threex.dragpancontrols.js"></script>
        <script src="vendor/headtrackr.js"></script>

        <style>
body {
    overflow    : hidden;
    padding     : 0;
    margin      : 0;
    color       : #222;
    background-color: #BBB;
    font-family : arial;
    font-size   : 100%;
}
#info .top {
    position    : absolute;
    top     : 0px;
    width       : 100%;
    padding     : 5px;
    text-align  : center;
}
#info a {
    color       : #66F;
    text-decoration : none;
}
#info a:hover {
    text-decoration : underline;
}
#info .bottom {
    position    : absolute;
    bottom      : 0px;
    right       : 5px;
    padding     : 5px;
}

        </style>
    </head>
<body>
    <!-- three.js container -->
        <div id="container"></div>
    <!-- info on screen display -->
    <div id="info">
        <!--<div class="top">
            <a href="http://learningthreejs.com/blog/2011/12/20/boilerplate-for-three-js/" target="_blank">LearningThree.js</a>
            boiler plate for
            <a href="https://github.com/mrdoob/three.js/" target="_blank">three.js</a>
        </div>-->
        <div class="bottom" id="inlineDoc" >
            - <i>p</i> for screenshot
        </div> 
    </div> 

<canvas id="compare" width="320" height="240" style="display:none"></canvas>
<video id="vid" autoplay loop></video>
<script type="x-shader/x-vertex" id="vertexShader">
varying vec2 texcoord0;


void main()
{
    // perform standard transform on vertex
    gl_Position = ftransform();

    // transform texcoords
    texcoord0 = vec2(gl_TextureMatrix[0] * gl_MultiTexCoord0);
}       
    </script>

    <script type="x-shader/x-vertex" id="fragmentShader">
varying vec2 texcoord0;

uniform sampler2D tex0;
uniform vec2 imageSize;
uniform float coef;

const vec4 lumcoeff = vec4(0.299,0.587,0.114,0.);

void main (void)
{

    vec4 pixel = texture2D(tex0, texcoord0);
    float luma = dot(lumcoeff, pixel);

    gl_FragColor =  vec4((texcoord0.x  / imageSize.x), luma, (texcoord0.y / imageSize.y) , 1.0);
}
    </script>
    <script type="text/javascript">
        var stats, scene, renderer;
        var camera, cameraControls;
        var videoInput = document.getElementById('vid');
        var canvasInput = document.getElementById('compare');   
        var projector = new THREE.Projector();
        var gl;
        var mesh,
        cube,
    attributes,
    uniforms,
    material,
    materials; 
        var videoTexture = new THREE.Texture( videoInput );

        if( !init() )   animate();

        // init the scene
        function init(){

            if( Detector.webgl ){
                renderer = new THREE.WebGLRenderer({
                    antialias       : true, // to get smoother output
                    preserveDrawingBuffer   : true  // to allow screenshot
                });
                renderer.setClearColorHex( 0xBBBBBB, 1 );
            // uncomment if webgl is required
            //}else{
            //  Detector.addGetWebGLMessage();
            //  return true;
            }else{
                renderer    = new THREE.CanvasRenderer();
                gl=renderer;
            }
            renderer.setSize( window.innerWidth, window.innerHeight );
            document.getElementById('container').appendChild(renderer.domElement);


            // create a scene
            scene = new THREE.Scene();

            // put a camera in the scene
            camera = new THREE.PerspectiveCamera( 23, window.innerWidth / window.innerHeight, 1, 100000 );
            camera.position.z = 0;
            scene.add( camera );
//
//          // create a camera contol
//          cameraControls  = new THREEx.DragPanControls(camera)

            // transparently support window resize
//          THREEx.WindowResize.bind(renderer, camera);
            // allow 'p' to make screenshot
            THREEx.Screenshot.bindKey(renderer);
            // allow 'f' to go fullscreen where this feature is supported
            if( THREEx.FullScreen.available() ){
                THREEx.FullScreen.bindKey();        
                document.getElementById('inlineDoc').innerHTML  += "- <i>f</i> for fullscreen";
            }
            materials   = new THREE.MeshLambertMaterial({
                    map : videoTexture
            });
            attributes = {};

            uniforms = {

              tex0: {type: 'mat2', value: materials},

              imageSize: {type: 'f', value: []},

              coef: {type: 'f', value: 1.0}

            };


        //Adding a directional light source to see anything..
        var directionalLight = new THREE.DirectionalLight(0xffffff);
        directionalLight.position.set(1, 1, 1).normalize();
        scene.add(directionalLight);    



            // video styling
            videoInput.style.position = 'absolute';
            videoInput.style.top = '50px';
            videoInput.style.zIndex = '100001';
            videoInput.style.display = 'block';

            // set up camera controller
            headtrackr.controllers.three.realisticAbsoluteCameraControl(camera, 1, [0,0,0], new THREE.Vector3(0,0,0), {damping : 1.1});
            var htracker = new headtrackr.Tracker();
            htracker.init(videoInput, canvasInput);
            htracker.start();

//          var stats = new Stats();
//          stats.domElement.style.position = 'absolute';
//          stats.domElement.style.top = '0px';
//          document.body.appendChild( stats.domElement );


document.addEventListener('headtrackrStatus', 
  function (event) {
    if (event.status == "found") {
        addCube();

    }
  }
);      

}    
        // animation loop
        function animate() {

            // loop on request animation loop
            // - it has to be at the begining of the function
            // - see details at http://my.opera.com/emoller/blog/2011/12/20/requestanimationframe-for-smart-er-animating
            requestAnimationFrame( animate );

            // do the render
            render();

            // update stats
            //stats.update();
        }

function render() {

            // convert matrix of every frame of video -> texture
            uniforms.tex0 = materials;
            uniforms.coef = 0.2;  
            uniforms.imageSize.x = window.innerWidth;
            uniforms.imageSize.y = window.innerHeight;
            // update camera controls
//          cameraControls.update();
            if(  videoInput.readyState ===  videoInput.HAVE_ENOUGH_DATA ){
                videoTexture.needsUpdate = true;
            }

            // actually render the scene
            renderer.render( scene, camera );
        }
function addCube(){
        material = new THREE.ShaderMaterial({
          uniforms: uniforms,
          attributes: attributes,
          vertexShader: document.getElementById('vertexShader').textContent,
          fragmentShader: document.getElementById('fragmentShader').textContent,
          transparent: true
        });


            //The cube
        cube = new THREE.Mesh(new THREE.CubeGeometry(40, 30, 10, 1, 1, 1, material), new THREE.MeshFaceMaterial());
        cube.overdraw = true;
        scene.add(cube);
}
</script>
</body>
</html>
4

1 回答 1

0

这里的主要问题是您使用的是用于可编程/固定功能互操作的旧 GLSL 保留字。在 OpenGL ES 2.0 中,类似gl_MultiTexCoord0gl_TextureMatrix [n]未定义的东西,因为它们完全移除了常规 OpenGL 必须处理的遗留固定功能顶点数组包袱。这些保留字让您拥有每个纹理单元的矩阵/顶点数组状态;它们在OpenGL ES中不存在,这是它们在 OpenGL 中的目的。

要解决这个问题,您必须使用通用顶点属性(例如attribute vec2 tex_st),而不是在纹理坐标指针和纹理单元之间进行 1:1 映射。同样,没有与每个纹理单元相关联的纹理矩阵。要复制纹理矩阵的功能,您需要在顶点/片段着色器中使用矩阵制服。

老实说,我不记得上次我真正发现在使用着色器时为每个纹理单元有一个单独的纹理矩阵/纹理坐标指针很有用......我经常有 4 或 5 个不同的纹理,可能只需要 1 或 2 个纹理坐标集。损失不大。

这里的关键是ftransform (...)。这是为了让在 OpenGL 中编写 1 行顶点着色器成为可能,其行为方式与固定功能管道相同。您必须复制并粘贴为 OpenGL 2.x 或 3.x(兼容性)编写的着色器。解释如何修复此着色器中的所有内容可能是一件真正的苦差事,在我刚刚写的大部分内容有意义之前,您可能需要了解更多关于 GLSL 的信息:-\

于 2013-09-07T06:48:05.923 回答