7

我参加了着色器课程,对计算机视觉和图像处理感兴趣。我想知道如何将 GLSL 着色器知识与图像处理相结合?如果我用 GLSL 实现图像处理算法,我会得到什么?

4

3 回答 3

4

案例研究:CPU 与 GPU 片段着色器上的实时框模糊

我在 CPU 和 GPU 片段着色器上实现了一个简单的框模糊https://en.wikipedia.org/wiki/Box_blur算法,看看哪个更快:

在此处输入图像描述

我的相机刷新率将 FPS 限制在 30,所以我测量了盒子的宽度,并且仍然保持 30 FPS。

在图像尺寸为 960x540 的 Lenovo T430 (2012)、NVIDIA NVS5400、Ubuntu 16.04 上,最大宽度为:

  • 显卡:23
  • 中央处理器:5

由于计算是二次的,因此加速比为:

( 23 / 5 ) ^ 2 = 21.16

GPU比CPU快!

并非所有算法在 GPU 上都更快。比如像交换RGB这样对单张图片的操作,在CPU上达到30FPS,所以再增加GPU编程的复杂度是没有用的。

就像任何其他 CPU 与 GPU 加速问题一样,如果您有足够的每个字节传输到 GPU 的工作量,那么一切都会下降,而基准测试是您能做的最好的事情。一般来说,二次算法或更差的算法对于 GPU 来说是一个不错的选择。另请参阅:术语“CPU 限制”和“I/O 限制”是什么意思?

代码的主要部分(只是从 GitHub 克隆):

#include "common.h"
#include "../v4l2/common_v4l2.h"

static const GLuint WIDTH = 640;
static const GLuint HEIGHT = 480;
static const GLfloat vertices[] = {
/*  xy            uv */
    -1.0,  1.0,   0.0, 1.0,
     0.0,  1.0,   0.0, 0.0,
     0.0, -1.0,   1.0, 0.0,
    -1.0, -1.0,   1.0, 1.0,
};
static const GLuint indices[] = {
    0, 1, 2,
    0, 2, 3,
};

static const GLchar *vertex_shader_source =
    "#version 330 core\n"
    "in vec2 coord2d;\n"
    "in vec2 vertexUv;\n"
    "out vec2 fragmentUv;\n"
    "void main() {\n"
    "    gl_Position = vec4(coord2d, 0, 1);\n"
    "    fragmentUv = vertexUv;\n"
    "}\n";
static const GLchar *fragment_shader_source =
    "#version 330 core\n"
    "in vec2 fragmentUv;\n"
    "out vec3 color;\n"
    "uniform sampler2D myTextureSampler;\n"
    "void main() {\n"
    "    color = texture(myTextureSampler, fragmentUv.yx).rgb;\n"
    "}\n";

static const GLchar *vertex_shader_source2 =
    "#version 330 core\n"
    "in vec2 coord2d;\n"
    "in vec2 vertexUv;\n"
    "out vec2 fragmentUv;\n"
    "void main() {\n"
    "    gl_Position = vec4(coord2d + vec2(1.0, 0.0), 0, 1);\n"
    "    fragmentUv = vertexUv;\n"
    "}\n";
static const GLchar *fragment_shader_source2 =
    "#version 330 core\n"
    "in vec2 fragmentUv;\n"
    "out vec3 color;\n"
    "uniform sampler2D myTextureSampler;\n"
    "// pixel Delta. How large a pixel is in 0.0 to 1.0 that textures use.\n"
    "uniform vec2 pixD;\n"
    "void main() {\n"

    /*"// Identity\n"*/
    /*"    color = texture(myTextureSampler, fragmentUv.yx ).rgb;\n"*/

    /*"// Inverter\n"*/
    /*"    color = 1.0 - texture(myTextureSampler, fragmentUv.yx ).rgb;\n"*/

    /*"// Swapper\n"*/
    /*"    color = texture(myTextureSampler, fragmentUv.yx ).gbr;\n"*/

    /*"// Double vision ortho.\n"*/
    /*"    color = ("*/
    /*"        texture(myTextureSampler, fragmentUv.yx ).rgb +\n"*/
    /*"        texture(myTextureSampler, fragmentUv.xy ).rgb\n"*/
    /*"    ) / 2.0;\n"*/

    /*"// Multi-me.\n"*/
    /*"    color = texture(myTextureSampler, 4.0 * fragmentUv.yx ).rgb;\n"*/

    /*"// Horizontal linear blur.\n"*/
    /*"    int blur_width = 21;\n"*/
    /*"    int blur_width_half = blur_width / 2;\n"*/
    /*"    color = vec3(0.0, 0.0, 0.0);\n"*/
    /*"    for (int i = -blur_width_half; i <= blur_width_half; ++i) {\n"*/
    /*"       color += texture(myTextureSampler, vec2(fragmentUv.y + i * pixD.x, fragmentUv.x)).rgb;\n"*/
    /*"    }\n"*/
    /*"    color /= blur_width;\n"*/

    /*"// Square linear blur.\n"*/
    "    int blur_width = 23;\n"
    "    int blur_width_half = blur_width / 2;\n"
    "    color = vec3(0.0, 0.0, 0.0);\n"
    "    for (int i = -blur_width_half; i <= blur_width_half; ++i) {\n"
    "       for (int j = -blur_width_half; j <= blur_width_half; ++j) {\n"
    "           color += texture(\n"
    "               myTextureSampler, fragmentUv.yx + ivec2(i, j) * pixD\n"
    "           ).rgb;\n"
    "       }\n"
    "    }\n"
    "    color /= (blur_width * blur_width);\n"

    "}\n";

int main(int argc, char **argv) {
    CommonV4l2 common_v4l2;
    GLFWwindow *window;
    GLint
        coord2d_location,
        myTextureSampler_location,
        vertexUv_location,
        coord2d_location2,
        pixD_location2,
        myTextureSampler_location2,
        vertexUv_location2
    ;
    GLuint
        ebo,
        program,
        program2,
        texture,
        vbo,
        vao,
        vao2
    ;
    unsigned int
        cpu,
        width,
        height
    ;
    uint8_t *image;
    float *image2 = NULL;
    /*uint8_t *image2 = NULL;*/

    if (argc > 1) {
        width = strtol(argv[1], NULL, 10);
    } else {
        width = WIDTH;
    }
    if (argc > 2) {
        height = strtol(argv[2], NULL, 10);
    } else {
        height = HEIGHT;
    }
    if (argc > 3) {
        cpu = (argv[3][0] == '1');
    } else {
        cpu = 0;
    }

    /* Window system. */
    glfwInit();
    glfwWindowHint(GLFW_RESIZABLE, GL_FALSE);
    window = glfwCreateWindow(2 * width, height, __FILE__, NULL, NULL);
    glfwMakeContextCurrent(window);
    glewInit();
    CommonV4l2_init(&common_v4l2, COMMON_V4L2_DEVICE, width, height);

    /* Shader setup. */
    program = common_get_shader_program(vertex_shader_source, fragment_shader_source);
    coord2d_location = glGetAttribLocation(program, "coord2d");
    vertexUv_location = glGetAttribLocation(program, "vertexUv");
    myTextureSampler_location = glGetUniformLocation(program, "myTextureSampler");

    /* Shader setup 2. */
    const GLchar *fs;
    if (cpu) {
        fs = fragment_shader_source;
    } else {
        fs = fragment_shader_source2;
    }
    program2 = common_get_shader_program(vertex_shader_source2, fs);
    coord2d_location2 = glGetAttribLocation(program2, "coord2d");
    vertexUv_location2 = glGetAttribLocation(program2, "vertexUv");
    myTextureSampler_location2 = glGetUniformLocation(program2, "myTextureSampler");
    pixD_location2 = glGetUniformLocation(program2, "pixD");

    /* Create vbo. */
    glGenBuffers(1, &vbo);
    glBindBuffer(GL_ARRAY_BUFFER, vbo);
    glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
    glBindBuffer(GL_ARRAY_BUFFER, 0);

    /* Create ebo. */
    glGenBuffers(1, &ebo);
    glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ebo);
    glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices, GL_STATIC_DRAW);
    glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);

    /* vao. */
    glGenVertexArrays(1, &vao);
    glBindVertexArray(vao);
    glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ebo);
    glBindBuffer(GL_ARRAY_BUFFER, vbo);
    glVertexAttribPointer(coord2d_location, 2, GL_FLOAT, GL_FALSE, 4 * sizeof(vertices[0]), (GLvoid*)0);
    glEnableVertexAttribArray(coord2d_location);
    glVertexAttribPointer(vertexUv_location, 2, GL_FLOAT, GL_FALSE, 4 * sizeof(GLfloat), (GLvoid*)(2 * sizeof(vertices[0])));
    glEnableVertexAttribArray(vertexUv_location);
    glBindVertexArray(0);

    /* vao2. */
    glGenVertexArrays(1, &vao2);
    glBindVertexArray(vao2);
    glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ebo);
    glBindBuffer(GL_ARRAY_BUFFER, vbo);
    glVertexAttribPointer(coord2d_location2, 2, GL_FLOAT, GL_FALSE, 4 * sizeof(vertices[0]), (GLvoid*)0);
    glEnableVertexAttribArray(coord2d_location2);
    glVertexAttribPointer(vertexUv_location2, 2, GL_FLOAT, GL_FALSE, 4 * sizeof(GLfloat), (GLvoid*)(2 * sizeof(vertices[0])));
    glEnableVertexAttribArray(vertexUv_location2);
    glBindVertexArray(0);

    /* Texture buffer. */
    glGenTextures(1, &texture);
    glBindTexture(GL_TEXTURE_2D, texture);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);

    /* Constant state. */
    glViewport(0, 0, 2 * width, height);
    glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
    glActiveTexture(GL_TEXTURE0);

    /* Main loop. */
    common_fps_init();
    do {
        /* Blocks until an image is available, thus capping FPS to that.
         * 30FPS is common in cheap webcams. */
        CommonV4l2_updateImage(&common_v4l2);
        image = CommonV4l2_getImage(&common_v4l2);
        glClear(GL_COLOR_BUFFER_BIT);

        /* Original. */
        glTexImage2D(
            GL_TEXTURE_2D, 0, GL_RGB, width, height,
            0, GL_RGB, GL_UNSIGNED_BYTE, image
        );
        glUseProgram(program);
        glUniform1i(myTextureSampler_location, 0);
        glBindVertexArray(vao);
        glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
        glBindVertexArray(0);

        /* Optional CPU modification to compare with GPU shader speed.  */
        if (cpu) {
            image2 = realloc(image2, 3 * width * height * sizeof(image2[0]));
            for (unsigned int i = 0; i < height; ++i) {
                for (unsigned int j = 0; j < width; ++j) {
                    size_t index = 3 * (i * width + j);

                    /* Inverter. */
                    /*image2[index + 0] = 1.0 - (image[index + 0] / 255.0);*/
                    /*image2[index + 1] = 1.0 - (image[index + 1] / 255.0);*/
                    /*image2[index + 2] = 1.0 - (image[index + 2] / 255.0);*/

                    /* Swapper. */
                    /*image2[index + 0] = image[index + 1] / 255.0;*/
                    /*image2[index + 1] = image[index + 2] / 255.0;*/
                    /*image2[index + 2] = image[index + 0] / 255.0;*/

                    /* Square linear blur. */
                    int blur_width = 5;
                    int blur_width_half = blur_width / 2;
                    int blur_width2 = (blur_width * blur_width);
                    image2[index + 0] = 0.0;
                    image2[index + 1] = 0.0;
                    image2[index + 2] = 0.0;
                    for (int k = -blur_width_half; k <= blur_width_half; ++k) {
                        for (int l = -blur_width_half; l <= blur_width_half; ++l) {
                            int i2 = i + k;
                            int j2 = j + l;
                            // Out of bounds is black. TODO: do module to match shader exactly. 
                            if (i2 > 0 && i2 < (int)height && j2 > 0 && j2 < (int)width) {
                                unsigned int srcIndex = index + 3 * (k * width + l);
                                image2[index + 0] += image[srcIndex + 0];
                                image2[index + 1] += image[srcIndex + 1];
                                image2[index + 2] += image[srcIndex + 2];
                            }
                        }
                    }
                    image2[index + 0] /= (blur_width2 * 255.0);
                    image2[index + 1] /= (blur_width2 * 255.0);
                    image2[index + 2] /= (blur_width2 * 255.0);
                }
            }
            glTexImage2D(
                GL_TEXTURE_2D, 0, GL_RGB, width, height,
                0, GL_RGB, GL_FLOAT, image2
            );
        }

        /* Modified. */
        glUseProgram(program2);
        glUniform1i(myTextureSampler_location2, 0);
        glUniform2f(pixD_location2, 1.0 / width, 1.0 / height);
        glBindVertexArray(vao2);
        glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
        glBindVertexArray(0);

        glfwSwapBuffers(window);
        glfwPollEvents();
        common_fps_print();
    } while (!glfwWindowShouldClose(window));

    /* Cleanup. */
    if (cpu) {
        free(image2);
    }
    CommonV4l2_deinit(&common_v4l2);
    glDeleteBuffers(1, &vbo);
    glDeleteVertexArrays(1, &vao);
    glDeleteTextures(1, &texture);
    glDeleteProgram(program);
    glfwTerminate();
    return EXIT_SUCCESS;
}
于 2016-11-16T19:47:59.540 回答
1

第一个明显的答案是您获得了并行性。现在,为什么使用 GLSL 而不是说更灵活的 CUDA 呢?GLSL 不需要您拥有 NVIDIA 显卡,因此它是一种更便携的解决方案(尽管您仍然可以选择 OpenCL)。

您可以通过并行获得什么?大多数时候,您可以独立处理像素。例如,增加图像的对比度通常需要您遍历所有像素并应用像素值的仿射变换。如果每个像素都由单独的线程处理,那么您不再需要执行此循环:您只需对四边形进行分级,然后应用像素着色器读取当前栅格化点处的纹理,然后输出到渲染目标 (或屏幕)转换后的像素值。

缺点是您的数据需要驻留在 GPU 上:您需要将所有图像传输到 GPU,这可能需要一些时间,并且会使并行化获得的加速变得无用。因此,GPU 实现通常在要进行的操作是计算密集型时完成,或者当整个管道可以保留在 GPU 上时(例如,如果目标是仅在屏幕上显示修改后的图像,则无需将图像传回 CPU 上)。

于 2012-12-04T01:25:46.910 回答
0

OpenGL 4.3(在 SIGGRAPH 2012 上宣布)支持计算着色器。如果您正在做严格的图形工作,并且已经在使用 OpenGL,那么使用它可能比 OpenCL / OpenGL 互操作(或 CUDA / OpenGL 互操作)更容易。

以下是 Khronos 对何时使用 4.3 Compute shaders 与 OpenCL 的对比:链接到 PDF;见幻灯片 5

于 2012-12-09T19:54:29.613 回答