OpenGL C++ mouse ray picking glm:unproject

OpenGL C++ mouse ray picking glm:unproject

本文关键字:glm unproject picking ray C++ mouse OpenGL      更新时间:2023-10-16

我目前正在开发一个C++游戏引擎,我想在应用程序中构建鼠标交互。我以前通过光线拾取来做到这一点,但当时我使用固定的鼠标位置,现在我想不这样做。我读到您可以使用 glm::unProject 函数来执行此操作,但我的就是不起作用。此函数给出的坐标不正确。我做错了什么?

rscore_projection_matrix = glm::perspective(45.0f, (float)(windowWidth)/(float)(windowHeight), 0.1f, 1000.0f);
rscore_view_matrix = glm::lookAt(glm::vec3(lengthdir_x(16, rscam_direction)+rscam_x, rscam_z, lengthdir_y(16, rscam_direction)+rscam_y), glm::vec3(rscam_x, 0, rscam_y), glm::vec3(0,1,0));
rscore_model_matrix = glm::mat4(1.0f);
glm::vec3 screenPos = glm::vec3(rscore_mouse_x, rscore_mouse_y, 0.1f);
glm::vec4 viewport = glm::vec4(0.0f, 0.0f, windowWidth, windowHeight);
glm::vec3 worldPos = glm::unProject(screenPos, rscore_model_matrix, rscore_projection_matrix, viewport);

我使用 vec3 worldPos 位置来绘制对象。

不确定这是否会对您有所帮助,但我以这种方式进行了光线拾取(计算光线的方向):

glm::vec3 CFreeCamera::CreateRay() {
    // these positions must be in range [-1, 1] (!!!), not [0, width] and [0, height]
    float mouseX = getMousePositionX() / (getWindowWidth()  * 0.5f) - 1.0f;
    float mouseY = getMousePositionY() / (getWindowHeight() * 0.5f) - 1.0f;
    glm::mat4 proj = glm::perspective(FoV, AspectRatio, Near, Far);
    glm::mat4 view = glm::lookAt(glm::vec3(0.0f), CameraDirection, CameraUpVector);
    glm::mat4 invVP = glm::inverse(proj * view);
    glm::vec4 screenPos = glm::vec4(mouseX, -mouseY, 1.0f, 1.0f);
    glm::vec4 worldPos = invVP * screenPos;
    glm::vec3 dir = glm::normalize(glm::vec3(worldPos));
    return dir;
}
// Values you might be interested:
glm::vec3 cameraPosition; // some camera position, this is supplied by you
glm::vec3 rayDirection = CFreeCamera::CreateRay();
glm::vec3 rayStartPositon = cameraPosition;
glm::vec3 rayEndPosition = rayStartPosition + rayDirection * someDistance;

解释:

当您将顶点的位置与视图和投影矩阵相乘时,您将获得像素位置。如果将像素位置与视图和投影矩阵的反转相乘,则得到世界的位置。

虽然计算逆矩阵很昂贵,但我不确定 glm::unProject 是如何工作的,它可能会做同样的事情。

这只会给你面向世界的光线方向(你应该已经有了相机的位置)。此代码不会与对象发生"冲突"。

相机类的其余代码在这里。

更多信息可以在这里找到 - 例如。

下面你可以看到gluUnproject是如何工作的。这突出了您忘记使用视图矩阵而只使用模型矩阵的事实。

int glhUnProjectf(float winx, float winy, float winz,
    float* modelview, float* projection, int* viewport, float* objectCoordinate)
{
    // Transformation matrices
    float m[16], A[16];
    float in[4], out[4];
    // Calculation for inverting a matrix, compute projection x modelview
    // and store in A[16]
    MultiplyMatrices4by4OpenGL_FLOAT(A, projection, modelview);
    // Now compute the inverse of matrix A
    if(glhInvertMatrixf2(A, m)==0)
       return 0;
    // Transformation of normalized coordinates between -1 and 1
    in[0]=(winx-(float) viewport[0])/(float) viewport[2]*2.0-1.0;
    in[1]=(winy-(float) viewport[1])/(float) viewport[3]*2.0-1.0;
    in[2]=2.0* winz-1.0;
    in[3]=1.0;
    // Objects coordinates
    MultiplyMatrixByVector4by4OpenGL_FLOAT(out, m, in);
    if(out[3]==0.0)
       return 0;
    out[3]=1.0/out[3];
    objectCoordinate[0]=out[0]*out[3];
    objectCoordinate[1]=out[1]*out[3];
    objectCoordinate[2]=out[2]*out[3];
    return 1;
}

代码取自此处。

此函数的 glm 实现(文档):

template<typename T, typename U, qualifier Q>
GLM_FUNC_QUALIFIER vec<3, T, Q> unProjectZO(vec<3, T, Q> const& win, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport)
{
    mat<4, 4, T, Q> Inverse = inverse(proj * model);
    vec<4, T, Q> tmp = vec<4, T, Q>(win, T(1));
    tmp.x = (tmp.x - T(viewport[0])) / T(viewport[2]);
    tmp.y = (tmp.y - T(viewport[1])) / T(viewport[3]);
    tmp.x = tmp.x * static_cast<T>(2) - static_cast<T>(1);
    tmp.y = tmp.y * static_cast<T>(2) - static_cast<T>(1);
    vec<4, T, Q> obj = Inverse * tmp;
    obj /= obj.w;
    return vec<3, T, Q>(obj);
}
template<typename T, typename U, qualifier Q>
GLM_FUNC_QUALIFIER vec<3, T, Q> unProjectNO(vec<3, T, Q> const& win, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport)
{
    mat<4, 4, T, Q> Inverse = inverse(proj * model);
    vec<4, T, Q> tmp = vec<4, T, Q>(win, T(1));
    tmp.x = (tmp.x - T(viewport[0])) / T(viewport[2]);
    tmp.y = (tmp.y - T(viewport[1])) / T(viewport[3]);
    tmp = tmp * static_cast<T>(2) - static_cast<T>(1);
    vec<4, T, Q> obj = Inverse * tmp;
    obj /= obj.w;
    return vec<3, T, Q>(obj);
}