我想通过这三种方法实现颜色选择/选择:
void ColorIndex(uint colorIndex)
{
glColor4ubv((GLubyte *)&colorIndex);
}
void ColorIndices(vector<uint> &colorIndices)
{
GLubyte *colorPtr = (GLubyte *)&colorIndices[0];
glColorPointer(4, GL_UNSIGNED_BYTE, 0, colorPtr);
}
void ReadSelectedIndices(int x, int y, int width, int height, uint *selectedIndices)
{
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glReadPixels(x, y, width, height, GL_RGBA, GL_UNSIGNED_BYTE, selectedIndices);
}
它们基本上将无符号整数(32 位)打包成 4 个字节 R、G、B、A。与 GL_SELECT 相比,它简单且非常快。
问题是它有时不起作用。它不适用于带有 NVIDIA 9800 显卡的 Windows。它适用于 Mac/Windows 中的 ATI Radeon 4670。
我当时认为问题出在 alpha 通道中,所以我尝试了这个实现:
union RgbColor
{
GLuint colorIndex : 24;
GLubyte components[3];
struct
{
GLubyte r;
GLubyte g;
GLubyte b;
};
};
void ColorIndex(uint colorIndex)
{
RgbColor color;
color.colorIndex = colorIndex;
glColor3ubv(color.components);
}
vector<GLubyte> colorComponents;
void ColorIndices(vector<uint> &colorIndices)
{
colorComponents.clear();
for (uint i = 0; i < colorIndices.size(); i++)
{
RgbColor color;
color.colorIndex = colorIndices[i];
colorComponents.push_back(color.components[0]);
colorComponents.push_back(color.components[1]);
colorComponents.push_back(color.components[2]);
}
GLubyte *colorPtr = (GLubyte *)&colorComponents[0];
glColorPointer(3, GL_UNSIGNED_BYTE, 0, colorPtr);
}
const uint kMaxSelectedIndicesCount = 2000 * 2000; // max width * max height resolution
GLubyte colorBuffer[kMaxSelectedIndicesCount * 3];
void ReadSelectedIndices(int x, int y, int width, int height, uint *selectedIndices)
{
uint count = (uint)width * (uint)height;
memset(colorBuffer, 0, count * 3);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glReadPixels(x, y, width, height, GL_RGB, GL_UNSIGNED_BYTE, colorBuffer);
GetGLError();
for (uint i = 0; i < count; i++)
{
RgbColor color;
color.components[0] = colorBuffer[i * 3 + 0];
color.components[1] = colorBuffer[i * 3 + 1];
color.components[2] = colorBuffer[i * 3 + 2];
selectedIndices[i] = color.colorIndex;
}
}
现在在显卡和操作系统上都可以选择,但有时会选择错误的顶点(在选择矩形之外)。
这怎么可能发生?有没有更好的方法来实现颜色选择,它在各种显卡上是健壮的并且总是正确的?