顺序的屏幕外渲染/屏幕捕获没有窗口系统使用OpenSceneGraph

Sequential off-screen rendering / screen capture without windowing system using OpenSceneGraph

本文关键字:屏幕 窗口 系统 OpenSceneGraph 顺序      更新时间:2023-10-16

我目前在一个屏幕外渲染器上工作,这样我就可以为现实世界的场景做相互信息注册。我使用OpenSceneGraph来处理大数据和自动加载。在顺序的单线程程序中获取帧缓冲区捕获有问题。

我有这样一个类(header):
#include <osg/ref_ptr>
#include <osg/Array>
#include <osg/ImageUtils>
#include <osgGA/StateSetManipulator>
#include <osgViewer/Viewer>
#include <osg/GraphicsContext>
#include <osg/Texture2D>
#include <osg/FrameBufferObject>
#include <osgDB/WriteFile>
#include <osg/Referenced>
#include <osg/Vec3>
#include <osg/Image>
#include <osg/State>
#include <string>
#include <chrono>
#include <thread>
#include <assert.h>
#include "ImagingPrimitives.h"
class BoundRenderScene {
public:
    BoundRenderScene();
    virtual ~BoundRenderScene();
    void NextFrame(void);
    inline OpenThreads::Mutex* GetMutexObject(void) { return &_mutex; }
    inline osg::Image* GetFrame(void)
    {
        OpenThreads::ScopedLock<OpenThreads::Mutex> lock(_mutex);
        return _frame.get();
    }
    inline void GetFrame(osg::Image* img)
    {
        OpenThreads::ScopedLock<OpenThreads::Mutex> lock(_mutex);
        if(_frame.valid() && (img!=NULL) && img->valid())
        {
            glReadBuffer(GL_BACK);
            img->readPixels(0,0,_camera_configuration->GetSX(),_camera_configuration->GetSY(), GL_RGB,GL_UNSIGNED_BYTE);
            uint w = img->s(), h = img->t(), d = img->r(), c = uint(img->getPixelSizeInBits()/8);
            /*
             * bare testing write op
             * osgDB::writeImageFile(const_cast<const osg::Image&>(*img), "/tmp/testimg.png");
             */
        }
    }
    inline void SetCameraConfiguration(CameraConfiguration* configuration) { _camera_configuration = configuration; }
    inline void SetCameraMatrix(osg::Matrixd camera_matrix) { _camera_matrix = camera_matrix; }
    inline void SetScene(osg::Node* scene) { _scene = scene; }
    inline void Initialize(void) {
        if(!_initialized)
            _init();
        else
            _re_init();
    }
protected:
    osgViewer::Viewer _viewer;
    osg::Matrixd _camera_matrix;
    osg::ref_ptr<osg::Texture2D> _tex;
    osg::ref_ptr<osg::FrameBufferObject> _fbo;
    mutable osg::ref_ptr<osg::Image> _frame;
    osg::ref_ptr<osg::Node> _scene;
    osg::ref_ptr<osg::GraphicsContext::Traits> _traits;
    osg::ref_ptr<osg::GraphicsContext> _gc;
    CameraConfiguration* _camera_configuration;
    SnapshotCallback* cb;
    std::string _filepath;
private:
    void _init(void);
    void _re_init(void);
    bool _initialized;
    mutable OpenThreads::Mutex  _mutex;
    osg::Matrixd pre_transform;
    osg::Matrixd transformation;
};

此外,因为许多例子在屏幕外渲染和屏幕捕获工作与Post/FinalDrawCallaback的,我复制了回调结构从"osgdistortion"的例子,但增加了互斥同步:

struct SnapshotCallback : public osg::Camera::DrawCallback
{
public:
    inline SnapshotCallback(OpenThreads::Mutex* mtx_obj, std::string filepath, int width, int height) : _filepath(filepath), _output_to_file(false), _mutex(mtx_obj)
    {
        _image = new osg::Image();
        _image->allocateImage(width, height, 1, GL_RGB, GL_UNSIGNED_BYTE);
        if(filepath!="")
            _output_to_file = true;
    }
    inline virtual void operator() (osg::RenderInfo& renderInfo) const
    {
        OpenThreads::ScopedLock<OpenThreads::Mutex> lock(*_mutex);
        osg::Camera* camera = renderInfo.getCurrentCamera();
        osg::Viewport* viewport = camera ? camera->getViewport() : 0;
        if(viewport && _image.valid())
        {
            glReadBuffer(GL_BACK);
            _image->readPixels(int(viewport->x()),int(viewport->y()),int(viewport->width()),int(viewport->height()), GL_RGB, GL_UNSIGNED_BYTE);
            if(_output_to_file)
            {
                osgDB::writeImageFile(*_image, _filepath);
            }
        }
    }
    inline virtual void operator() (const osg::Camera& camera) const
    {
        OpenThreads::ScopedLock<OpenThreads::Mutex> lock(*_mutex);
        osg::Viewport* viewport = camera.getViewport();
        if(viewport && _image.valid())
        {
            glReadBuffer(GL_BACK);
            _image->readPixels(int(viewport->x()),int(viewport->y()),int(viewport->width()),int(viewport->height()), GL_RGB, GL_UNSIGNED_BYTE);
            if(_output_to_file)
            {
                osgDB::writeImageFile(*_image, _filepath);
            }
        }
    }
    std::string _filepath;
    bool _output_to_file;
    mutable OpenThreads::Mutex*  _mutex;
    mutable osg::ref_ptr<osg::Image> _image;
};

我初始化和渲染场景如下:

#include "BoundRenderScene.h"
void BoundRenderScene::_init(void)
{
    if(_camera!=NULL)
        _viewer.setDone(true);
    _traits->x = 0;
    _traits->y = 0;
    _traits->width = _camera_configuration->GetSX();
    _traits->height = _camera_configuration->GetSY();
    _traits->red = 8;
    _traits->green = 8;
    _traits->blue = 8;
    _traits->alpha = 0;
    _traits->depth = 24;
    _traits->windowDecoration = false;
    _traits->pbuffer = true;
    _traits->doubleBuffer = true;
    _traits->sharedContext = 0x0;

    if(_gc.get()!=NULL)
    {
        bool release_success = _gc->releaseContext();
        if(!release_success)
            std::cerr << "Error releasing Graphics Context.";
    }
    _gc = osg::GraphicsContext::createGraphicsContext(_traits.get());
    _viewer.getCamera()->setGraphicsContext(_gc.get());
    _viewer.setThreadingModel(osgViewer::Viewer::SingleThreaded);
    _viewer.setUpThreading();
    _viewer.realize();

    _frame->allocateImage(_camera_configuration->GetSX(), _camera_configuration->GetSY(), 1, GL_RGB, GL_UNSIGNED_BYTE);
    _viewer.getCamera()->getOrCreateStateSet();
    _viewer.getCamera()->setRenderTargetImplementation(osg::Camera::PIXEL_BUFFER);
    cb = new SnapshotCallback(&_mutex,_filepath, _camera_configuration->GetSX(), _camera_configuration->GetSY());
    //_viewer.getCamera()->setPostDrawCallback( cb );
    //Clear colour "black" for representing "no information" => background elimination in natural image, pls.
    _viewer.getCamera()->setClearColor(osg::Vec4f(0.25f, 0.25f, 0.25f, 1.0f));
    _viewer.getCamera()->setClearMask(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);
    _viewer.getCamera()->setDrawBuffer(GL_BACK);
    _viewer.getCamera()->setReadBuffer(GL_BACK);
    _viewer.getCamera()->setViewport(0,0,_camera_configuration->GetSX(),_camera_configuration->GetSY());
    _viewer.getCamera()->setProjectionMatrix(osg::Matrixd::perspective(osg::RadiansToDegrees(_camera_configuration->GetFoV()), _camera_configuration->GetAspectRatio(), 0.1, 150.0));
    //looking in geo-coord system
    _viewer.getCamera()->setViewMatrix(osg::Matrixd::lookAt(osg::Vec3d(0.0, 0.0, -1.0), osg::Vec3d(0.0, 0.0, 1.0), osg::Vec3d(0.0, 1.0, 0.0)));
    _viewer.getCamera()->attach(osg::Camera::COLOR_BUFFER, _frame.get());
    _viewer.getCamera()->setRenderTargetImplementation(osg::Camera::FRAME_BUFFER_OBJECT);
    _tex->setTextureSize(_camera_configuration->GetSX(), _camera_configuration->GetSY());
    _tex->setInternalFormat(GL_RGB);
    _tex->setFilter(osg::Texture::MIN_FILTER, osg::Texture::LINEAR);
    _tex->setFilter(osg::Texture::MAG_FILTER, osg::Texture::LINEAR);
    _tex->setWrap(osg::Texture::WRAP_S, osg::Texture::CLAMP_TO_EDGE);
    _tex->setWrap(osg::Texture::WRAP_T, osg::Texture::CLAMP_TO_EDGE);
    _tex->setResizeNonPowerOfTwoHint(false);
    _tex->setImage(0,_frame.get());
    _fbo->setAttachment(osg::Camera::COLOR_BUFFER, osg::FrameBufferAttachment(_tex.get()));
    _viewer.setDone(false);
    _viewer.setSceneData(_scene.get());
    _viewer.setCameraManipulator(0x0);
}
void BoundRenderScene::NextFrame(void)
{
    OpenThreads::ScopedLock<OpenThreads::Mutex> lock(_mutex);
    if(_frame.valid() && !_viewer.done())
    {
        osg::Matrixd inverse_cam = osg::Matrixd::inverse(_camera_matrix);
        transformation = inverse_cam * pre_transform;
        _viewer.getCamera()->setViewMatrix(transformation);
        _viewer.updateTraversal();
        _viewer.frame();
    }
    else
        std::cout << "Viewer or Camera invalid." << std::endl;
}

主要的工作流程看起来像这样(简化):

BoundRenderScene renderer;
std::vector<osg::Matrixd> poses;
/*
 * setting initial parameters
 * fill poses with camera positions to render, for regsitration
 */
renderer._init();
for(uint i = 0; i < poses.size(); i++)
{
    renderer.SetCameraMatrix(poses.at(i));
    renderer.NextImage();
    sleep(0.04); // to get the 25fps frame limit
    osg::Image* reg_image = renderer.GetImage();
    /*
     * Do further processing
     */
}

现在到了关键:OpenSceneGraph示例"osgprenderer"(包含在OSG中)使用OSG::Camera::DrawCallback进行屏幕外渲染,作为我的SnapshotCallback。不幸的是,在我的例子中,operator()函数从来没有在我的场景中被调用过,所以这种屏幕捕获方式对我不起作用。这也相当不方便,因为互信息过程的其余部分是一个相当顺序的管道。

其他包装器(https://github.com/xarray/osgRecipes/blob/master/integrations/osgberkelium/osgberkelium.cpp)使用类似于我的"void GetFrame(osg::Image* img)"方法的方法,其中使用"readPixels"主动读取图像。这对我的工作流程非常方便,但是该方法总是返回一个空白图像。它不会崩溃,但它也不能做它的工作。

有效的方法是"osg: and:Image* GetFrame(void)",它返回绑定/附加的FBO图像。它类似于"osgdistortion"的例子。它确实可以渲染一到两个图像,但一段时间后,渲染和处理不同步,应用程序崩溃,如下所示:

[---FIRST FRAME---]
GraphicsCostEstimator::calibrate(..)
cull_draw() 0x1998ca0
ShaderComposer::~ShaderComposer() 0x35a4d40
Renderer::compile()
OpenGL extension 'GL_ARB_vertex_buffer_object' is supported.
OpenGL extension 'GL_EXT_secondary_color' is supported.
OpenGL extension 'GL_EXT_fog_coord' is supported.
OpenGL extension '' is not supported.
OpenGL extension 'GL_EXT_packed_depth_stencil' is supported.
Setting up osg::Camera::FRAME_BUFFER_OBJECT
end cull_draw() 0x1998ca0
[processing]
[   SECOND FRAME   ]
cull_draw() 0x1998ca0
OpenGL extension 'GL_ARB_fragment_program' is supported.
OpenGL extension 'GL_ARB_vertex_program' is supported.
OpenGL extension 'GL_ARB_shader_objects' is supported.
OpenGL extension 'GL_ARB_vertex_shader' is supported.
OpenGL extension 'GL_ARB_fragment_shader' is supported.
OpenGL extension 'GL_ARB_shading_language_100' is supported.
OpenGL extension 'GL_EXT_geometry_shader4' is supported.
OpenGL extension 'GL_EXT_gpu_shader4' is supported.
OpenGL extension 'GL_ARB_tessellation_shader' is supported.
OpenGL extension 'GL_ARB_uniform_buffer_object' is supported.
OpenGL extension 'GL_ARB_get_program_binary' is supported.
OpenGL extension 'GL_ARB_gpu_shader_fp64' is supported.
OpenGL extension 'GL_ARB_shader_atomic_counters' is supported.
glVersion=4.5, isGlslSupported=YES, glslLanguageVersion=4.5
Warning: detected OpenGL error 'invalid operation' at end of SceneView::draw()
end cull_draw() 0x1998ca0
[-FROM 3rd FRAME ONWARDS-]
[workload, matrix setup]
[_viewer.frame()]
cull_draw() 0x1998ca0
Warning: detected OpenGL error 'invalid operation' at start of State::apply()
end cull_draw() 0x1998ca0
[next frame]
[BREAKING]
cull_draw() 0x1998ca0
Warning: detected OpenGL error 'invalid operation' at start of State::apply()
end cull_draw() 0x1998ca0
[more work]
Segmentation fault (core dumped)

所以,问题是:

  • 我从osg查看了查看器相关类的源文件,但我无法确定错误

    在哪里。

    警告:在State::apply()开始时检测到OpenGL错误'invalid operation'

    来源。你知道从哪里开始找吗?

  • 对于顺序渲染和屏幕捕获,在OSG中使用哪种方法最好?

  • 我如何获得正常osg::查看器的互斥,以便与py管道的其余部分同步渲染器?(渲染器是单线程的)
  • 从经验OpenSceneGraph屏幕外的其他建议渲染器和屏幕截图?

更深入的研究表明,在类析构函数中释放图形上下文释放了OpenGL管道,但是:它也取消了加载场景/模型的状态绑定纹理的分配,尽管模型本身没有被挂起(如问题所述:它在接下来的过程中被重用)。因此,在进一步的渲染通道中,渲染管道想要访问通过释放GL上下文而被释放的OSG资源。

在代码中从:

BoundRenderScene::~BoundRenderScene() {
    // TODO Auto-generated destructor stub
    _viewer.setDone(true);
    _viewer.setReleaseContextAtEndOfFrameHint(true);
    _gc->releaseContext();
#ifdef DEBUG
    std::cout << "BoundRenderScene deleted." << std::endl;
#endif
}

:

BoundRenderScene::~BoundRenderScene() {
    // TODO Auto-generated destructor stub
    _viewer.setDone(true);
    _viewer.setReleaseContextAtEndOfFrameHint(true);
#ifdef DEBUG
    std::cout << "BoundRenderScene deleted." << std::endl;
#endif
}

这解决了OpenSceneGraph-internal错误消息。现在,为了解决帧捕获问题本身,我从osgprenderer实现了回调:

struct SnapshotCallback : public osg::Camera::DrawCallback
{
public:
    inline SnapshotCallback(std::string filepath) : _filepath(filepath), _output_to_file(false), _image(NULL)
    {
        if(filepath!="")
            _output_to_file = true;
        _image = new osg::Image();
    }
    inline virtual void operator() (osg::RenderInfo& renderInfo) const
    {
        osg::Camera* camera = renderInfo.getCurrentCamera();
        osg::Viewport* viewport = camera ? camera->getViewport() : 0;
        if(viewport)
        {
            glReadBuffer(camera->getDrawBuffer());
            _image->allocateImage(int(viewport->width()), int(viewport->height()), 1, GL_RGB, GL_UNSIGNED_BYTE);
            _image->readPixels(int(viewport->x()),int(viewport->y()),int(viewport->width()),int(viewport->height()), GL_RGB, GL_UNSIGNED_BYTE);
            if(_output_to_file)
            {
                osgDB::writeImageFile(*reinterpret_cast<osg::Image*>(_image->clone(osg::CopyOp::DEEP_COPY_ALL)), _filepath);
            }
        }
    }
    inline virtual void operator() (const osg::Camera& camera) const
    {
        osg::Viewport* viewport = camera.getViewport();
        if(viewport)
        {
            glReadBuffer(camera.getDrawBuffer());
            _image->allocateImage(int(viewport->width()), int(viewport->height()), 1, GL_RGB, GL_UNSIGNED_BYTE);
            _image->readPixels(int(viewport->x()),int(viewport->y()),int(viewport->width()),int(viewport->height()), GL_RGB, GL_UNSIGNED_BYTE);
            if(_output_to_file)
            {
                osgDB::writeImageFile(*reinterpret_cast<osg::Image*>(_image->clone(osg::CopyOp::DEEP_COPY_ALL)), _filepath);
            }
        }
    }
    inline osg::Image* GetImage(void)
    {
        return reinterpret_cast<osg::Image*>(_image->clone(osg::CopyOp::DEEP_COPY_ALL));
    }
protected:
    std::string _filepath;
    bool _output_to_file;
    mutable osg::ref_ptr<osg::Image> _image;
};

现在,用克隆的缓冲区代替实际的图像缓冲区(从osgscreencapture示例中继承的想法),我确实得到了没有内存错误的真实图像。

对于双缓冲渲染,我虽然必须以某种方式渲染场景两次,以正确的缓冲区包含对象的图像,但这是我的用例目前较少的问题(I/o绑定渲染,而不是操作绑定)。

所以,主函数看起来像这样:

BoundRenderScene renderer;
std::vector<osg::Matrixd> poses;
/*
 * setting initial parameters
 * fill poses with camera positions to render, for registration
 */
renderer._init();
for(uint i = 0; i < poses.size(); i++)
{
    renderer.SetCameraMatrix(poses.at(i));
    renderer.NextImage();
    renderer.NextImage();
    osg::Image* reg_image = renderer.GetImage();
    /*
     * Do further processing
     */
}