OpenGL 3+中的球体纹理

Texturing a Sphere in OpenGL 3+

本文关键字:纹理 OpenGL      更新时间:2023-10-16

我想将一个1024*512的图像包裹在一个球体上。

这是我的代码:(并非全部显示)

在main.cpp:中

struct Vertex {
    GLdouble position[3];
    GLfloat color[3];
    GLfloat textureUV[2];
};
void SetupGemetry() {
    //Allocate 4 VBOs
    glGenBuffers(4, vbo);
    }
//Setting the current Buffer.
void setBuffer(int buffer_no, int no_vertices, Vertex *vertices_array)
{
    glBindBuffer(GL_ARRAY_BUFFER, vbo[buffer_no]);
    glBufferData ( GL_ARRAY_BUFFER, no_vertices * sizeof ( struct Vertex ), vertices_array, GL_STATIC_DRAW );
    glEnableVertexAttribArray( 0 );
    glVertexAttribPointer ( ( GLuint ) 0, 3, GL_DOUBLE, GL_FALSE, sizeof ( struct Vertex ), ( const GLvoid* ) 
    offsetof(struct Vertex ,position) );        
    glEnableVertexAttribArray ( 1 );
    glVertexAttribPointer ( ( GLuint ) 1, 3, GL_FLOAT, GL_FALSE, sizeof ( struct Vertex ), ( const GLvoid* )
    offsetof(struct Vertex,color) );
    //Textute Handling
    glEnableVertexAttribArray( 2 );
    glVertexAttribPointer(( GLuint ) 2,3,GL_FLOAT,GL_FALSE,sizeof(struct Vertex),(const GLvoid* )offsetof(struct Vertex ,texture));

}

现在在我的Sphere.cpp中(在这里我加载*.bmp文件)

GLuint Sphere::loadBMP_custom(const char * imagepath)
{
    // Data read from the header of the BMP file
    unsigned char header[54]; // Each BMP file begins by a 54-bytes header
    unsigned int dataPos;     // Position in the file where the actual data begins
    unsigned int width, height;
    unsigned int imageSize;   // = width*height*3
    // Actual RGB data
    unsigned char * data;
    // Open the file
    FILE * file = fopen(imagepath,"rb");
    if (!file)
    {
        printf("Image could not be openedn"); 
        return 0;
    }
    //Checking the header file
    if ( fread(header, 1, 54, file)!=54 )
    { // If not 54 bytes read : problem
        printf("Not a correct BMP filen");
        return false;
    }
    if ( header[0]!='B' || header[1]!='M' )
    {
        printf("Not a correct BMP filen");
        return 0;
    }
    dataPos    = *(int*)&(header[0x0A]);
    imageSize  = *(int*)&(header[0x22]);
    width      = *(int*)&(header[0x12]);
    height     = *(int*)&(header[0x16]);
    // Some BMP files are misformatted, guess missing information
    if (imageSize==0)    imageSize=width*height*3; // 3 : one byte for each Red, Green and Blue component
    if (dataPos==0)      dataPos=54; // The BMP header is done that way
    // Create a buffer
    data = new unsigned char [imageSize];
    // Read the actual data from the file into the buffer
    fread(data,1,imageSize,file);
    //Everything is in memory now, the file can be closed
    fclose(file);
    //OpenGL Part
    // Create one OpenGL texture
    GLuint textureID;
    glGenTextures(1, &textureID);
    // "Bind" the newly created texture : all future texture functions will modify this texture
    glBindTexture(GL_TEXTURE_2D, textureID);
    // Give the image to OpenGL
    glTexImage2D(GL_TEXTURE_2D, 0,GL_RGB, width, height, 0, GL_BGR, GL_UNSIGNED_BYTE, data);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
}
void Sphere::SetupShaders_sphere(void){
    // Read our shaders into the appropriate buffers
    sphere_vertexsource = filetobuf_sphere("vertex_shader_2.vert");
    sphere_fragmentsource = filetobuf_sphere("fragment_shader_2.frag");
    //Assign our handles a "name" to new shader objects 
    sphere_vertexshader = glCreateShader(GL_VERTEX_SHADER);
    sphere_fragmentshader = glCreateShader(GL_FRAGMENT_SHADER);
    // Associate the source code buffers with each handle
    glShaderSource(sphere_vertexshader, 1, (const GLchar**)&sphere_vertexsource, 0);
    glShaderSource(sphere_fragmentshader, 1, (const GLchar**)&sphere_fragmentsource, 0);
    //Setting them up by compiling, attaching and linking them!
    glCompileShader(sphere_vertexshader);
    glCompileShader(sphere_fragmentshader);
    sphere_shaderprogram = glCreateProgram();
    glAttachShader(sphere_shaderprogram, sphere_vertexshader);
    glAttachShader(sphere_shaderprogram, sphere_fragmentshader);
    glBindAttribLocation(sphere_shaderprogram, 0, "in_Position"); 
    glBindAttribLocation(sphere_shaderprogram, 1, "in_Color");
    glLinkProgram(sphere_shaderprogram);
    glUseProgram(sphere_shaderprogram);
}

现在我的碎片和顶点着色器:

顶点:

#version 330 core
// Input vertex data, different for all executions of this shader.
layout(location = 0) in vec3 in_Position;
layout(location = 1) in vec3 inColor;
layout(location = 2) in vec2 vertexUV;
// Output data ; will be interpolated for each fragment.
out vec2 UV;
// Values that stay constant for the whole mesh.
uniform mat4 MVP_matrix;
void main(){
    // Output position of the vertex, in clip space : MVP * position
    gl_Position =  MVP_matrix * vec4(in_Position,1);
    // UV of the vertex. No special space for this one.
    UV = vertexUV;
}

片段:

#version 330 core
// Interpolated values from the vertex shaders
in vec2 UV;
// Ouput data
out vec3 color;
// Values that stay constant for the whole mesh.
uniform sampler2D myTextureSampler;
void main(){
    // Output color = color of the texture at the specified UV
    color = texture2D( myTextureSampler, UV ).rgb;
}

如何将图像文件坐标(UV)转换为3D坐标?如何将顶点着色器传递给它们?(我知道如何将数据传递到顶点,我的意思是如何将2D图像文件转换为3D?)例如,我发现了这个不错的教程

www.opengl-tutorial.org/beginners-tutorials/tutorial-5-a-textured-cube/

也就是说:"好吧,我们在这里必须做完全相同的事情,但我们将给出(U,V)对的缓冲区,而不是给出缓冲区(R,G,B)三元组。"

// Two UV coordinatesfor each vertex. They were created with Blender. You'll learn shortly how to do this yourself.
static const GLfloat g_uv_buffer_data[] = {
    0.000059f, 1.0f-0.000004f,
    0.000103f, 1.0f-0.336048f,
    0.335973f, 1.0f-0.335903f,
    1.000023f, 1.0f-0.000013f,
    0.667979f, 1.0f-0.335851f,
    0.999958f, 1.0f-0.336064f,
    0.667979f, 1.0f-0.335851f,
    0.336024f, 1.0f-0.671877f,
    0.667969f, 1.0f-0.671889f,
    1.000023f, 1.0f-0.000013f,
    0.668104f, 1.0f-0.000013f,
    0.667979f, 1.0f-0.335851f,
    0.000059f, 1.0f-0.000004f,
    0.335973f, 1.0f-0.335903f,
    0.336098f, 1.0f-0.000071f,
    0.667979f, 1.0f-0.335851f,
    0.335973f, 1.0f-0.335903f,
    0.336024f, 1.0f-0.671877f,
    1.000004f, 1.0f-0.671847f,
    0.999958f, 1.0f-0.336064f,
    0.667979f, 1.0f-0.335851f,
    0.668104f, 1.0f-0.000013f,
    0.335973f, 1.0f-0.335903f,
    0.667979f, 1.0f-0.335851f,
    0.335973f, 1.0f-0.335903f,
    0.668104f, 1.0f-0.000013f,
    0.336098f, 1.0f-0.000071f,
    0.000103f, 1.0f-0.336048f,
    0.000004f, 1.0f-0.671870f,
    0.336024f, 1.0f-0.671877f,
    0.000103f, 1.0f-0.336048f,
    0.336024f, 1.0f-0.671877f,
    0.335973f, 1.0f-0.335903f,
    0.667969f, 1.0f-0.671889f,
    1.000004f, 1.0f-0.671847f,
    0.667979f, 1.0f-0.335851f
};

我必须用搅拌机吗?我发现了这个http://www.opengl.org/wiki/Texturing_a_Sphere#2D_Texture_Mapping_a_Sphere但我仍然无法理解

如何将图像文件坐标(UV)转换为3D坐标?

你没有。纹理不构成几何体。您还需要提供球体的坐标。不,您不必使用Blender,它只是用于导出一些顶点数据(位置、法线和纹理坐标)供以后使用。因此,除了UV缓冲区外,还应该有一个位置和一个正常缓冲区。

"如何将图像文件坐标(UV)转换为3D坐标?"这个问题听起来正好相反。您应该问"如何将球体坐标(3D)转换为图像坐标(UV)?教程示例使用Blender创建UV数据的静态阵列。如果要计算球体的顶点,则需要同时计算UV坐标,并将其存储在"顶点"结构中。或者,如果您有一组预先计算的静态球体坐标,则需要计算一组类似的UV坐标。其想法是获取3D数据并通过映射函数传递,该函数返回纹理的哪个像素用于该3D点的U、V坐标。

最简单的计算方法是使用极坐标(方位角、高度)并将其映射到U、V范围。如果正在计算球体的3D顶点,则算法可能已经使用极坐标并调用trig函数来获得X、Y、Z。

opengl.org链接提到"立方体贴图"是对球体进行纹理处理的首选方法。"为了获得最佳效果,请使用立方体贴图。应用2D纹理的问题是,当您将2D纹理包裹到球体(球体的顶部和底部区域)上时,纹理看起来受到挤压。"

当包裹在球体或其他三维曲线周围时,您建议的二维贴图将始终具有扭曲。可以调整U、V映射算法以最小化效果或将接缝隐藏在主视线之外,但如果旋转球体,它们将显示在某个位置。