glGenTextures有效,但每次都返回相同的纹理名称
glGenTextures works but returns the same texture name every single time
我有一个名为Texture
的类。这个类负责管理纹理。在程序启动时,OpenGL上下文被正确初始化(这就是为什么这个问题与大多数涉及意外glGenTextures行为的问题不同)。Texture
从glGenTextures()
获得纹理名称,然后在函数texGLInit()
中将图像数据加载并绑定到该纹理名称的存储器中。这是有效的,并且纹理显示完全符合预期。
但是,我也希望我的纹理能够更改当用户单击按钮并从OpenFileDiaglog
中的HDD中选择一个按钮时显示的纹理。此函数称为reloadTexture()
。此功能尝试从内存中删除旧的图像/像素数据,并用用户选择的文件中的新数据替换。然而,当这种情况发生时,它会使用glDeleteTextures
删除纹理名称,然后分配一个新的纹理名称,并使用函数texGLInit()
将新的像素数据加载到内存中。但纹理名称在100%的时间内与之前的名称完全相同(通常为"1")。
发生这种情况后显示的图像很奇怪。它有新的图像尺寸,但仍然有旧的图像像素。简单地说,它将旧图像缩小到新图像的大小。它仍在使用据称已删除的像素数据进行绘制。应该发生的是,屏幕现在在屏幕上显示新的图像文件。我相信这与纹理名称不唯一有关。
代码包含在下面:
Texture::Texture(string filename)//---Constructor loads in the initial image. Works fine!
{
textureID[0]=0;
const char* fnPtr = filename.c_str(); //our image loader accepts a ptr to a char, not a string
//printf(fnPtr);
lodepng::load_file(buffer, fnPtr);//load the file into a buffer
unsigned error = lodepng::decode(image,w,h,buffer);//lodepng's decode function will load the pixel data into image vector from the buffer
//display any errors with the texture
if(error)
{
cout << "ndecoder error " << error << ": " << lodepng_error_text(error) <<endl;
}
//execute the code that'll throw exceptions to do with the images size
checkPOT(w);
checkPOT(h);
//image now contains our pixeldata. All ready for OpenGL to do its thing
//let's get this texture up in the video memory
texGLInit();
Draw_From_Corner = CENTER;
}
void Texture::reloadTexture(string filename)//Reload texture replaces the texture name and image/pixel data bound to this texture
{
//first and foremost clear the image and buffer vectors back down to nothing so we can start afresh
buffer.clear();
image.clear();
w = 0;
h = 0;
//also delete the texture name we were using before
glDeleteTextures(1, &textureID[0]);
const char* fnPtr = filename.c_str(); //our image loader accepts a ptr to a char, not a string
//printf(fnPtr);
lodepng::load_file(buffer, fnPtr);//load the file into a buffer
unsigned error = lodepng::decode(image,w,h,buffer);//lodepng's decode function will load the pixel data into image vector from the buffer
//display any errors with the texture
if(error)
{
cout << "ndecoder error " << error << ": " << lodepng_error_text(error) <<endl;
}
//execute the code that'll throw exceptions to do with the images size
checkPOT(w);
checkPOT(h);
//image now contains our pixeldata. All ready for to do its thing
//let's get this texture up in the video memoryOpenGL
texGLInit();
Draw_From_Corner = CENTER;
}
void Texture::texGLInit()//Actually gets the new texture name loads the pixeldata into openGL
{
glGenTextures(1, &textureID[0]);
////printf("ntextureID = %u", textureID[0]);
glBindTexture(GL_TEXTURE_2D, textureID[0]);//evrything we're about to do is about this texture
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
//glDisable(GL_COLOR_MATERIAL);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8,w,h,0, GL_RGBA, GL_UNSIGNED_BYTE, &image[0]);
//we COULD free the image vectors memory right about now. But we'll do it when there's a need to. At the beginning of the reloadtexture func, makes sure it happens when we need it to.
}
这里值得一提的是Texture类中的draw函数。
void Texture::draw(point* origin, ANCHOR drawFrom)
{
//let us set the DFC enum here.
Draw_From_Corner = drawFrom;
glEnable(GL_TEXTURE_2D);
//printf("nDrawing texture at (%f, %f)",centerPoint.x, centerPoint.y);
glBindTexture(GL_TEXTURE_2D, textureID[0]);//bind the texture
//create a quick vertex array for the primitive we're going to bind the texture to
////printf("TexID = %u",textureID[0]);
GLfloat vArray[8];
#pragma region anchor switch
switch (Draw_From_Corner)
{
case CENTER:
vArray[0] = origin->x-(w/2); vArray[1] = origin->y-(h/2);//bottom left i0
vArray[2] = origin->x-(w/2); vArray[3] = origin->y+(h/2);//top left i1
vArray[4] = origin->x+(w/2); vArray[5] = origin->y+(h/2);//top right i2
vArray[6] = origin->x+(w/2); vArray[7] = origin->y-(h/2);//bottom right i3
break;
case BOTTOMLEFT:
vArray[0] = origin->x; vArray[1] = origin->y;//bottom left i0
vArray[2] = origin->x; vArray[3] = origin->y+h;//top left i1
vArray[4] = origin->x+w; vArray[5] = origin->y+h;//top right i2
vArray[6] = origin->x+w; vArray[7] = origin->y;//bottom right i3
break;
case TOPLEFT:
vArray[0] = origin->x; vArray[1] = origin->y-h;//bottom left i0
vArray[2] = origin->x; vArray[3] = origin->y;//top left i1
vArray[4] = origin->x+w; vArray[5] = origin->y;//top right i2
vArray[6] = origin->x+w; vArray[7] = origin->y-h;//bottom right i3
break;
case TOPRIGHT:
vArray[0] = origin->x-w; vArray[1] = origin->y-h;//bottom left i0
vArray[2] = origin->x-w; vArray[3] = origin->y;//top left i1
vArray[4] = origin->x; vArray[5] = origin->y;//top right i2
vArray[6] = origin->x; vArray[7] = origin->y-h;//bottom right i3
break;
case BOTTOMRIGHT:
vArray[0] = origin->x-w; vArray[1] = origin->y;//bottom left i0
vArray[2] = origin->x-w; vArray[3] = origin->y+h;//top left i1
vArray[4] = origin->x-h; vArray[5] = origin->y;//top right i2
vArray[6] = origin->x; vArray[7] = origin->y;//bottom right i3
break;
default: //same as center
vArray[0] = origin->x-(w/2); vArray[1] = origin->y-(h/2);//bottom left i0
vArray[2] = origin->x-(w/2); vArray[3] = origin->y+(h/2);//top left i1
vArray[4] = origin->x+(w/2); vArray[5] = origin->y+(h/2);//top right i2
vArray[6] = origin->x+(w/2); vArray[7] = origin->y-(h/2);//bottom right i3
break;
}
#pragma endregion
//create a quick texture array (we COULD create this on the heap rather than creating/destoying every cycle)
GLfloat tArray[8] =
{
//this has been tinkered with from my normal order. I think LodePNG is bringing the PD upside down. SO A QUICK FIX HERE WAS NECESSARY.
0.0f,1.0f,//0
0.0f,0.0f,//1
1.0f,0.0f,//2
1.0f,1.0f//3
};
//and finally.. the index array...remember, we draw in triangles....(and we'll go CW)
GLubyte iArray[6] =
{
0,1,2,
0,2,3
};
//Activate arrays
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
//Give openGL a pointer to our vArray and tArray
glVertexPointer(2, GL_FLOAT, 0, &vArray[0]);
glTexCoordPointer(2, GL_FLOAT, 0, &tArray[0]);
//Draw it all
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_BYTE, &iArray[0]);
//glDrawArrays(GL_TRIANGLES,0,6);
//Disable the vertex arrays
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glDisable(GL_TEXTURE_2D);
}
有人能告诉我为什么OpenGL不会从我加载到它的新像素数据中加载和绘制吗?正如我所说,我怀疑这与glGenTextures没有给我一个新的纹理名称有关。
您正在调用glTexImage2D
并传递一个指向客户端内存的指针。小心,文件上写着:
如果在指定纹理图像的同时将非零命名缓冲区对象绑定到
GL_PIXEL_UNPACK_BUFFER
目标(请参见glBindBuffer
),则data
将被视为缓冲区对象的数据存储中的字节偏移。
为了安全起见,您可能希望调用glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0)
来解除绑定任何缓冲区对象。
- 来自 std::list 的迭代器 .end() 按预期返回"0xcdcdcdcdcdcdcdcd"但 .begin()
- 什么时候在C++中返回常量引用是个好主意
- 你能重载对象变量名本身返回的内容吗
- 为什么 Serial.println(<char[]>);返回随机字符?
- C++映射:具有自定义类的运算符[]不起作用(总是返回0)
- 如何获取std::result_of函数的返回类型
- QueryWorkingSet总是返回false
- (C++)分析树以计算返回错误值的简单算术表达式
- 访问者访问变体并返回不同类型时出错
- 如何返回一个类的两个对象相加的结果
- OpenInventor从9.8升级到10.4.2后,GLSL纹理返回零
- OpenGL GLSL 纹理函数始终返回 vec4(1,1,1,1),白色三角形
- C++无法返回纹理向量
- Assimp 不返回纹理数据
- DXGI_FORMAT_YUY2纹理在 Windows 8.1 和 Windows 10 下返回不同的 RowPitch
- GLSL纹理功能仅返回黑色的天空盒
- CUDA 纹理无法返回无符号长长长的值类型
- C++返回 sf::纹理时遇到问题
- stb图像加载库返回白色纹理
- glGenTextures有效,但每次都返回相同的纹理名称