opencv/modules/core/src/opengl_interop.cpp
Andrey Kamaev 2a6fb2867e Remove all using directives for STL namespace and members
Made all STL usages explicit to be able automatically find all usages of
particular class or function.
2013-02-25 15:04:17 +04:00

1577 lines
39 KiB
C++

/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "opencv2/core/opengl_interop.hpp"
#include "opencv2/core/gpumat.hpp"
#ifdef HAVE_OPENGL
#include "gl_core_3_1.hpp"
#ifdef HAVE_CUDA
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
#endif
#endif
namespace
{
#ifndef HAVE_OPENGL
void throw_nogl() { CV_Error(CV_OpenGlNotSupported, "The library is compiled without OpenGL support"); }
#else
void throw_nogl() { CV_Error(CV_OpenGlApiCallError, "OpenGL context doesn't exist"); }
#ifndef HAVE_CUDA
void throw_nocuda() { CV_Error(CV_GpuNotSupported, "The library is compiled without GPU support"); }
#else
void throw_nocuda() { CV_Error(CV_StsNotImplemented, "The called functionality is disabled for current build or platform"); }
#if defined(__GNUC__)
#define cudaSafeCall(expr) ___cudaSafeCall(expr, __FILE__, __LINE__, __func__)
#else /* defined(__CUDACC__) || defined(__MSVC__) */
#define cudaSafeCall(expr) ___cudaSafeCall(expr, __FILE__, __LINE__)
#endif
void ___cudaSafeCall(cudaError_t err, const char* file, const int line, const char* func = "")
{
if (cudaSuccess != err)
cv::gpu::error(cudaGetErrorString(err), file, line, func);
}
#endif
#endif
}
bool cv::checkGlError(const char* file, const int line, const char* func)
{
#ifndef HAVE_OPENGL
(void) file;
(void) line;
(void) func;
return true;
#else
GLenum err = gl::GetError();
if (err != gl::NO_ERROR_)
{
const char* msg;
switch (err)
{
case gl::INVALID_ENUM:
msg = "An unacceptable value is specified for an enumerated argument";
break;
case gl::INVALID_VALUE:
msg = "A numeric argument is out of range";
break;
case gl::INVALID_OPERATION:
msg = "The specified operation is not allowed in the current state";
break;
case gl::OUT_OF_MEMORY:
msg = "There is not enough memory left to execute the command";
break;
default:
msg = "Unknown error";
};
cvError(CV_OpenGlApiCallError, func, msg, file, line);
return false;
}
return true;
#endif
}
#ifdef HAVE_OPENGL
namespace
{
const GLenum gl_types[] = { gl::UNSIGNED_BYTE, gl::BYTE, gl::UNSIGNED_SHORT, gl::SHORT, gl::INT, gl::FLOAT, gl::DOUBLE };
}
#endif
////////////////////////////////////////////////////////////////////////
// setGlDevice
void cv::gpu::setGlDevice(int device)
{
#ifndef HAVE_OPENGL
(void) device;
throw_nogl();
#else
#if !defined(HAVE_CUDA) || defined(CUDA_DISABLER)
(void) device;
throw_nocuda();
#else
cudaSafeCall( cudaGLSetGLDevice(device) );
#endif
#endif
}
////////////////////////////////////////////////////////////////////////
// CudaResource
#if defined(HAVE_OPENGL) && defined(HAVE_CUDA) && !defined(CUDA_DISABLER)
namespace
{
class CudaResource
{
public:
CudaResource();
~CudaResource();
void registerBuffer(GLuint buffer);
void release();
void copyFrom(const void* src, size_t spitch, size_t width, size_t height, cudaStream_t stream = 0);
void copyTo(void* dst, size_t dpitch, size_t width, size_t height, cudaStream_t stream = 0);
void* map(cudaStream_t stream = 0);
void unmap(cudaStream_t stream = 0);
private:
cudaGraphicsResource_t resource_;
GLuint buffer_;
class GraphicsMapHolder;
};
CudaResource::CudaResource() : resource_(0), buffer_(0)
{
}
CudaResource::~CudaResource()
{
release();
}
void CudaResource::registerBuffer(GLuint buffer)
{
CV_DbgAssert( buffer != 0 );
if (buffer_ == buffer)
return;
cudaGraphicsResource_t resource;
cudaSafeCall( cudaGraphicsGLRegisterBuffer(&resource, buffer, cudaGraphicsMapFlagsNone) );
release();
resource_ = resource;
buffer_ = buffer;
}
void CudaResource::release()
{
if (resource_)
cudaGraphicsUnregisterResource(resource_);
resource_ = 0;
buffer_ = 0;
}
class CudaResource::GraphicsMapHolder
{
public:
GraphicsMapHolder(cudaGraphicsResource_t* resource, cudaStream_t stream);
~GraphicsMapHolder();
void reset();
private:
cudaGraphicsResource_t* resource_;
cudaStream_t stream_;
};
CudaResource::GraphicsMapHolder::GraphicsMapHolder(cudaGraphicsResource_t* resource, cudaStream_t stream) : resource_(resource), stream_(stream)
{
if (resource_)
cudaSafeCall( cudaGraphicsMapResources(1, resource_, stream_) );
}
CudaResource::GraphicsMapHolder::~GraphicsMapHolder()
{
if (resource_)
cudaGraphicsUnmapResources(1, resource_, stream_);
}
void CudaResource::GraphicsMapHolder::reset()
{
resource_ = 0;
}
void CudaResource::copyFrom(const void* src, size_t spitch, size_t width, size_t height, cudaStream_t stream)
{
CV_DbgAssert( resource_ != 0 );
GraphicsMapHolder h(&resource_, stream);
(void) h;
void* dst;
size_t size;
cudaSafeCall( cudaGraphicsResourceGetMappedPointer(&dst, &size, resource_) );
CV_DbgAssert( width * height == size );
if (stream == 0)
cudaSafeCall( cudaMemcpy2D(dst, width, src, spitch, width, height, cudaMemcpyDeviceToDevice) );
else
cudaSafeCall( cudaMemcpy2DAsync(dst, width, src, spitch, width, height, cudaMemcpyDeviceToDevice, stream) );
}
void CudaResource::copyTo(void* dst, size_t dpitch, size_t width, size_t height, cudaStream_t stream)
{
CV_DbgAssert( resource_ != 0 );
GraphicsMapHolder h(&resource_, stream);
(void) h;
void* src;
size_t size;
cudaSafeCall( cudaGraphicsResourceGetMappedPointer(&src, &size, resource_) );
CV_DbgAssert( width * height == size );
if (stream == 0)
cudaSafeCall( cudaMemcpy2D(dst, dpitch, src, width, width, height, cudaMemcpyDeviceToDevice) );
else
cudaSafeCall( cudaMemcpy2DAsync(dst, dpitch, src, width, width, height, cudaMemcpyDeviceToDevice, stream) );
}
void* CudaResource::map(cudaStream_t stream)
{
CV_DbgAssert( resource_ != 0 );
GraphicsMapHolder h(&resource_, stream);
void* ptr;
size_t size;
cudaSafeCall( cudaGraphicsResourceGetMappedPointer(&ptr, &size, resource_) );
h.reset();
return ptr;
}
void CudaResource::unmap(cudaStream_t stream)
{
CV_Assert( resource_ != 0 );
cudaGraphicsUnmapResources(1, &resource_, stream);
}
}
#endif
////////////////////////////////////////////////////////////////////////
// GlBuffer
#ifndef HAVE_OPENGL
class cv::GlBuffer::Impl
{
};
#else
class cv::GlBuffer::Impl
{
public:
static const Ptr<Impl>& empty();
Impl(GLuint bufId, bool autoRelease);
Impl(GLsizeiptr size, const GLvoid* data, GLenum target, bool autoRelease);
~Impl();
void bind(GLenum target) const;
void copyFrom(GLuint srcBuf, GLsizeiptr size);
void copyFrom(GLsizeiptr size, const GLvoid* data);
void copyTo(GLsizeiptr size, GLvoid* data) const;
void* mapHost(GLenum access);
void unmapHost();
#ifdef HAVE_CUDA
void copyFrom(const void* src, size_t spitch, size_t width, size_t height, cudaStream_t stream = 0);
void copyTo(void* dst, size_t dpitch, size_t width, size_t height, cudaStream_t stream = 0) const;
void* mapDevice(cudaStream_t stream = 0);
void unmapDevice(cudaStream_t stream = 0);
#endif
void setAutoRelease(bool flag) { autoRelease_ = flag; }
GLuint bufId() const { return bufId_; }
private:
Impl();
GLuint bufId_;
bool autoRelease_;
#ifdef HAVE_CUDA
mutable CudaResource cudaResource_;
#endif
};
const cv::Ptr<cv::GlBuffer::Impl>& cv::GlBuffer::Impl::empty()
{
static Ptr<Impl> p(new Impl);
return p;
}
cv::GlBuffer::Impl::Impl() : bufId_(0), autoRelease_(true)
{
}
cv::GlBuffer::Impl::Impl(GLuint abufId, bool autoRelease) : bufId_(abufId), autoRelease_(autoRelease)
{
}
cv::GlBuffer::Impl::Impl(GLsizeiptr size, const GLvoid* data, GLenum target, bool autoRelease) : bufId_(0), autoRelease_(autoRelease)
{
gl::GenBuffers(1, &bufId_);
CV_CheckGlError();
CV_Assert( bufId_ != 0 );
gl::BindBuffer(target, bufId_);
CV_CheckGlError();
gl::BufferData(target, size, data, gl::DYNAMIC_DRAW);
CV_CheckGlError();
gl::BindBuffer(target, 0);
CV_CheckGlError();
}
cv::GlBuffer::Impl::~Impl()
{
if (autoRelease_ && bufId_)
gl::DeleteBuffers(1, &bufId_);
}
void cv::GlBuffer::Impl::bind(GLenum target) const
{
gl::BindBuffer(target, bufId_);
CV_CheckGlError();
}
void cv::GlBuffer::Impl::copyFrom(GLuint srcBuf, GLsizeiptr size)
{
gl::BindBuffer(gl::COPY_WRITE_BUFFER, bufId_);
CV_CheckGlError();
gl::BindBuffer(gl::COPY_READ_BUFFER, srcBuf);
CV_CheckGlError();
gl::CopyBufferSubData(gl::COPY_READ_BUFFER, gl::COPY_WRITE_BUFFER, 0, 0, size);
CV_CheckGlError();
}
void cv::GlBuffer::Impl::copyFrom(GLsizeiptr size, const GLvoid* data)
{
gl::BindBuffer(gl::COPY_WRITE_BUFFER, bufId_);
CV_CheckGlError();
gl::BufferSubData(gl::COPY_WRITE_BUFFER, 0, size, data);
CV_CheckGlError();
}
void cv::GlBuffer::Impl::copyTo(GLsizeiptr size, GLvoid* data) const
{
gl::BindBuffer(gl::COPY_READ_BUFFER, bufId_);
CV_CheckGlError();
gl::GetBufferSubData(gl::COPY_READ_BUFFER, 0, size, data);
CV_CheckGlError();
}
void* cv::GlBuffer::Impl::mapHost(GLenum access)
{
gl::BindBuffer(gl::COPY_READ_BUFFER, bufId_);
CV_CheckGlError();
GLvoid* data = gl::MapBuffer(gl::COPY_READ_BUFFER, access);
CV_CheckGlError();
return data;
}
void cv::GlBuffer::Impl::unmapHost()
{
gl::UnmapBuffer(gl::COPY_READ_BUFFER);
}
#ifdef HAVE_CUDA
void cv::GlBuffer::Impl::copyFrom(const void* src, size_t spitch, size_t width, size_t height, cudaStream_t stream)
{
cudaResource_.registerBuffer(bufId_);
cudaResource_.copyFrom(src, spitch, width, height, stream);
}
void cv::GlBuffer::Impl::copyTo(void* dst, size_t dpitch, size_t width, size_t height, cudaStream_t stream) const
{
cudaResource_.registerBuffer(bufId_);
cudaResource_.copyTo(dst, dpitch, width, height, stream);
}
void* cv::GlBuffer::Impl::mapDevice(cudaStream_t stream)
{
cudaResource_.registerBuffer(bufId_);
return cudaResource_.map(stream);
}
void cv::GlBuffer::Impl::unmapDevice(cudaStream_t stream)
{
cudaResource_.unmap(stream);
}
#endif
#endif // HAVE_OPENGL
cv::GlBuffer::GlBuffer() : rows_(0), cols_(0), type_(0)
{
#ifndef HAVE_OPENGL
throw_nogl();
#else
impl_ = Impl::empty();
#endif
}
cv::GlBuffer::GlBuffer(int arows, int acols, int atype, unsigned int abufId, bool autoRelease) : rows_(0), cols_(0), type_(0)
{
#ifndef HAVE_OPENGL
(void) arows;
(void) acols;
(void) atype;
(void) abufId;
(void) autoRelease;
throw_nogl();
#else
impl_ = new Impl(abufId, autoRelease);
rows_ = arows;
cols_ = acols;
type_ = atype;
#endif
}
cv::GlBuffer::GlBuffer(Size asize, int atype, unsigned int abufId, bool autoRelease) : rows_(0), cols_(0), type_(0)
{
#ifndef HAVE_OPENGL
(void) asize;
(void) atype;
(void) abufId;
(void) autoRelease;
throw_nogl();
#else
impl_ = new Impl(abufId, autoRelease);
rows_ = asize.height;
cols_ = asize.width;
type_ = atype;
#endif
}
cv::GlBuffer::GlBuffer(int arows, int acols, int atype, Target target, bool autoRelease) : rows_(0), cols_(0), type_(0)
{
create(arows, acols, atype, target, autoRelease);
}
cv::GlBuffer::GlBuffer(Size asize, int atype, Target target, bool autoRelease) : rows_(0), cols_(0), type_(0)
{
create(asize, atype, target, autoRelease);
}
cv::GlBuffer::GlBuffer(InputArray arr, Target target, bool autoRelease) : rows_(0), cols_(0), type_(0)
{
#ifndef HAVE_OPENGL
(void) arr;
(void) target;
(void) autoRelease;
throw_nogl();
#else
const int kind = arr.kind();
switch (kind)
{
case _InputArray::OPENGL_BUFFER:
{
copyFrom(arr, target, autoRelease);
break;
}
case _InputArray::OPENGL_TEXTURE2D:
{
copyFrom(arr, target, autoRelease);
break;
}
case _InputArray::GPU_MAT:
{
copyFrom(arr, target, autoRelease);
break;
}
default:
{
Mat mat = arr.getMat();
CV_Assert( mat.isContinuous() );
const GLsizeiptr asize = mat.rows * mat.cols * mat.elemSize();
impl_ = new Impl(asize, mat.data, target, autoRelease);
rows_ = mat.rows;
cols_ = mat.cols;
type_ = mat.type();
break;
}
}
#endif
}
void cv::GlBuffer::create(int arows, int acols, int atype, Target target, bool autoRelease)
{
#ifndef HAVE_OPENGL
(void) arows;
(void) acols;
(void) atype;
(void) target;
(void) autoRelease;
throw_nogl();
#else
if (rows_ != arows || cols_ != acols || type_ != atype)
{
const GLsizeiptr asize = arows * acols * CV_ELEM_SIZE(atype);
impl_ = new Impl(asize, 0, target, autoRelease);
rows_ = arows;
cols_ = acols;
type_ = atype;
}
#endif
}
void cv::GlBuffer::release()
{
#ifdef HAVE_OPENGL
if (*impl_.refcount == 1)
impl_->setAutoRelease(true);
impl_ = Impl::empty();
rows_ = 0;
cols_ = 0;
type_ = 0;
#endif
}
void cv::GlBuffer::setAutoRelease(bool flag)
{
#ifndef HAVE_OPENGL
(void) flag;
throw_nogl();
#else
impl_->setAutoRelease(flag);
#endif
}
void cv::GlBuffer::copyFrom(InputArray arr, Target target, bool autoRelease)
{
#ifndef HAVE_OPENGL
(void) arr;
(void) target;
(void) autoRelease;
throw_nogl();
#else
const int kind = arr.kind();
if (kind == _InputArray::OPENGL_TEXTURE2D)
{
GlTexture2D tex = arr.getGlTexture2D();
tex.copyTo(*this);
setAutoRelease(autoRelease);
return;
}
const Size asize = arr.size();
const int atype = arr.type();
create(asize, atype, target, autoRelease);
switch (kind)
{
case _InputArray::OPENGL_BUFFER:
{
GlBuffer buf = arr.getGlBuffer();
impl_->copyFrom(buf.bufId(), asize.area() * CV_ELEM_SIZE(atype));
break;
}
case _InputArray::GPU_MAT:
{
#if !defined HAVE_CUDA || defined(CUDA_DISABLER)
throw_nocuda();
#else
GpuMat dmat = arr.getGpuMat();
impl_->copyFrom(dmat.data, dmat.step, dmat.cols * dmat.elemSize(), dmat.rows);
#endif
break;
}
default:
{
Mat mat = arr.getMat();
CV_Assert( mat.isContinuous() );
impl_->copyFrom(asize.area() * CV_ELEM_SIZE(atype), mat.data);
}
}
#endif
}
void cv::GlBuffer::copyTo(OutputArray arr, Target target, bool autoRelease) const
{
#ifndef HAVE_OPENGL
(void) arr;
(void) target;
(void) autoRelease;
throw_nogl();
#else
const int kind = arr.kind();
switch (kind)
{
case _InputArray::OPENGL_BUFFER:
{
arr.getGlBufferRef().copyFrom(*this, target, autoRelease);
break;
}
case _InputArray::OPENGL_TEXTURE2D:
{
arr.getGlTexture2DRef().copyFrom(*this, autoRelease);
break;
}
case _InputArray::GPU_MAT:
{
#if !defined HAVE_CUDA || defined(CUDA_DISABLER)
throw_nocuda();
#else
GpuMat& dmat = arr.getGpuMatRef();
dmat.create(rows_, cols_, type_);
impl_->copyTo(dmat.data, dmat.step, dmat.cols * dmat.elemSize(), dmat.rows);
#endif
break;
}
default:
{
arr.create(rows_, cols_, type_);
Mat mat = arr.getMat();
CV_Assert( mat.isContinuous() );
impl_->copyTo(mat.rows * mat.cols * mat.elemSize(), mat.data);
}
}
#endif
}
cv::GlBuffer cv::GlBuffer::clone(Target target, bool autoRelease) const
{
#ifndef HAVE_OPENGL
(void) target;
(void) autoRelease;
throw_nogl();
return GlBuffer();
#else
GlBuffer buf;
buf.copyFrom(*this, target, autoRelease);
return buf;
#endif
}
void cv::GlBuffer::bind(Target target) const
{
#ifndef HAVE_OPENGL
(void) target;
throw_nogl();
#else
impl_->bind(target);
#endif
}
void cv::GlBuffer::unbind(Target target)
{
#ifndef HAVE_OPENGL
(void) target;
throw_nogl();
#else
gl::BindBuffer(target, 0);
CV_CheckGlError();
#endif
}
cv::Mat cv::GlBuffer::mapHost(Access access)
{
#ifndef HAVE_OPENGL
(void) access;
throw_nogl();
return cv::Mat();
#else
return cv::Mat(rows_, cols_, type_, impl_->mapHost(access));
#endif
}
void cv::GlBuffer::unmapHost()
{
#ifndef HAVE_OPENGL
throw_nogl();
#else
return impl_->unmapHost();
#endif
}
cv::gpu::GpuMat cv::GlBuffer::mapDevice()
{
#ifndef HAVE_OPENGL
throw_nogl();
return cv::gpu::GpuMat();
#else
#if !defined HAVE_CUDA || defined(CUDA_DISABLER)
throw_nocuda();
return cv::gpu::GpuMat();
#else
return cv::gpu::GpuMat(rows_, cols_, type_, impl_->mapDevice());
#endif
#endif
}
void cv::GlBuffer::unmapDevice()
{
#ifndef HAVE_OPENGL
throw_nogl();
#else
#if !defined HAVE_CUDA || defined(CUDA_DISABLER)
throw_nocuda();
#else
impl_->unmapDevice();
#endif
#endif
}
unsigned int cv::GlBuffer::bufId() const
{
#ifndef HAVE_OPENGL
throw_nogl();
return 0;
#else
return impl_->bufId();
#endif
}
template <> void cv::Ptr<cv::GlBuffer::Impl>::delete_obj()
{
if (obj) delete obj;
}
//////////////////////////////////////////////////////////////////////////////////////////
// GlTexture2D
#ifndef HAVE_OPENGL
class cv::GlTexture2D::Impl
{
};
#else
class cv::GlTexture2D::Impl
{
public:
static const Ptr<Impl> empty();
Impl(GLuint texId, bool autoRelease);
Impl(GLint internalFormat, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid* pixels, bool autoRelease);
~Impl();
void copyFrom(GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid *pixels);
void copyTo(GLenum format, GLenum type, GLvoid* pixels) const;
void bind() const;
void setAutoRelease(bool flag) { autoRelease_ = flag; }
GLuint texId() const { return texId_; }
private:
Impl();
GLuint texId_;
bool autoRelease_;
};
const cv::Ptr<cv::GlTexture2D::Impl> cv::GlTexture2D::Impl::empty()
{
static Ptr<Impl> p(new Impl);
return p;
}
cv::GlTexture2D::Impl::Impl() : texId_(0), autoRelease_(true)
{
}
cv::GlTexture2D::Impl::Impl(GLuint atexId, bool autoRelease) : texId_(atexId), autoRelease_(autoRelease)
{
}
cv::GlTexture2D::Impl::Impl(GLint internalFormat, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid* pixels, bool autoRelease) : texId_(0), autoRelease_(autoRelease)
{
gl::GenTextures(1, &texId_);
CV_CheckGlError();
CV_Assert(texId_ != 0);
gl::BindTexture(gl::TEXTURE_2D, texId_);
CV_CheckGlError();
gl::PixelStorei(gl::UNPACK_ALIGNMENT, 1);
CV_CheckGlError();
gl::TexImage2D(gl::TEXTURE_2D, 0, internalFormat, width, height, 0, format, type, pixels);
CV_CheckGlError();
gl::GenerateMipmap(gl::TEXTURE_2D);
CV_CheckGlError();
}
cv::GlTexture2D::Impl::~Impl()
{
if (autoRelease_ && texId_)
gl::DeleteTextures(1, &texId_);
}
void cv::GlTexture2D::Impl::copyFrom(GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid *pixels)
{
gl::BindTexture(gl::TEXTURE_2D, texId_);
CV_CheckGlError();
gl::PixelStorei(gl::UNPACK_ALIGNMENT, 1);
CV_CheckGlError();
gl::TexSubImage2D(gl::TEXTURE_2D, 0, 0, 0, width, height, format, type, pixels);
CV_CheckGlError();
gl::GenerateMipmap(gl::TEXTURE_2D);
CV_CheckGlError();
}
void cv::GlTexture2D::Impl::copyTo(GLenum format, GLenum type, GLvoid* pixels) const
{
gl::BindTexture(gl::TEXTURE_2D, texId_);
CV_CheckGlError();
gl::PixelStorei(gl::PACK_ALIGNMENT, 1);
CV_CheckGlError();
gl::GetTexImage(gl::TEXTURE_2D, 0, format, type, pixels);
CV_CheckGlError();
}
void cv::GlTexture2D::Impl::bind() const
{
gl::BindTexture(gl::TEXTURE_2D, texId_);
CV_CheckGlError();
}
#endif // HAVE_OPENGL
cv::GlTexture2D::GlTexture2D() : rows_(0), cols_(0), format_(NONE)
{
#ifndef HAVE_OPENGL
throw_nogl();
#else
impl_ = Impl::empty();
#endif
}
cv::GlTexture2D::GlTexture2D(int arows, int acols, Format aformat, unsigned int atexId, bool autoRelease) : rows_(0), cols_(0), format_(NONE)
{
#ifndef HAVE_OPENGL
(void) arows;
(void) acols;
(void) aformat;
(void) atexId;
(void) autoRelease;
throw_nogl();
#else
impl_ = new Impl(atexId, autoRelease);
rows_ = arows;
cols_ = acols;
format_ = aformat;
#endif
}
cv::GlTexture2D::GlTexture2D(Size asize, Format aformat, unsigned int atexId, bool autoRelease) : rows_(0), cols_(0), format_(NONE)
{
#ifndef HAVE_OPENGL
(void) asize;
(void) aformat;
(void) atexId;
(void) autoRelease;
throw_nogl();
#else
impl_ = new Impl(atexId, autoRelease);
rows_ = asize.height;
cols_ = asize.width;
format_ = aformat;
#endif
}
cv::GlTexture2D::GlTexture2D(int arows, int acols, Format aformat, bool autoRelease) : rows_(0), cols_(0), format_(NONE)
{
create(arows, acols, aformat, autoRelease);
}
cv::GlTexture2D::GlTexture2D(Size asize, Format aformat, bool autoRelease) : rows_(0), cols_(0), format_(NONE)
{
create(asize, aformat, autoRelease);
}
cv::GlTexture2D::GlTexture2D(InputArray arr, bool autoRelease) : rows_(0), cols_(0), format_(NONE)
{
#ifndef HAVE_OPENGL
(void) arr;
(void) autoRelease;
throw_nogl();
#else
const int kind = arr.kind();
const Size asize = arr.size();
const int atype = arr.type();
const int depth = CV_MAT_DEPTH(atype);
const int cn = CV_MAT_CN(atype);
CV_Assert( depth <= CV_32F );
CV_Assert( cn == 1 || cn == 3 || cn == 4 );
const Format internalFormats[] =
{
NONE, DEPTH_COMPONENT, NONE, RGB, RGBA
};
const GLenum srcFormats[] =
{
0, gl::DEPTH_COMPONENT, 0, gl::BGR, gl::BGRA
};
switch (kind)
{
case _InputArray::OPENGL_BUFFER:
{
GlBuffer buf = arr.getGlBuffer();
buf.bind(GlBuffer::PIXEL_UNPACK_BUFFER);
impl_ = new Impl(internalFormats[cn], asize.width, asize.height, srcFormats[cn], gl_types[depth], 0, autoRelease);
GlBuffer::unbind(GlBuffer::PIXEL_UNPACK_BUFFER);
break;
}
case _InputArray::GPU_MAT:
{
#if !defined HAVE_CUDA || defined(CUDA_DISABLER)
throw_nocuda();
#else
GpuMat dmat = arr.getGpuMat();
GlBuffer buf(dmat, GlBuffer::PIXEL_UNPACK_BUFFER);
buf.bind(GlBuffer::PIXEL_UNPACK_BUFFER);
impl_ = new Impl(internalFormats[cn], asize.width, asize.height, srcFormats[cn], gl_types[depth], 0, autoRelease);
GlBuffer::unbind(GlBuffer::PIXEL_UNPACK_BUFFER);
#endif
break;
}
default:
{
Mat mat = arr.getMat();
CV_Assert( mat.isContinuous() );
GlBuffer::unbind(GlBuffer::PIXEL_UNPACK_BUFFER);
impl_ = new Impl(internalFormats[cn], asize.width, asize.height, srcFormats[cn], gl_types[depth], mat.data, autoRelease);
break;
}
}
rows_ = asize.height;
cols_ = asize.width;
format_ = internalFormats[cn];
#endif
}
void cv::GlTexture2D::create(int arows, int acols, Format aformat, bool autoRelease)
{
#ifndef HAVE_OPENGL
(void) arows;
(void) acols;
(void) aformat;
(void) autoRelease;
throw_nogl();
#else
if (rows_ != arows || cols_ != acols || format_ != aformat)
{
GlBuffer::unbind(GlBuffer::PIXEL_UNPACK_BUFFER);
impl_ = new Impl(aformat, acols, arows, aformat, gl::FLOAT, 0, autoRelease);
rows_ = arows;
cols_ = acols;
format_ = aformat;
}
#endif
}
void cv::GlTexture2D::release()
{
#ifdef HAVE_OPENGL
if (*impl_.refcount == 1)
impl_->setAutoRelease(true);
impl_ = Impl::empty();
rows_ = 0;
cols_ = 0;
format_ = NONE;
#endif
}
void cv::GlTexture2D::setAutoRelease(bool flag)
{
#ifndef HAVE_OPENGL
(void) flag;
throw_nogl();
#else
impl_->setAutoRelease(flag);
#endif
}
void cv::GlTexture2D::copyFrom(InputArray arr, bool autoRelease)
{
#ifndef HAVE_OPENGL
(void) arr;
(void) autoRelease;
throw_nogl();
#else
const int kind = arr.kind();
const Size asize = arr.size();
const int atype = arr.type();
const int depth = CV_MAT_DEPTH(atype);
const int cn = CV_MAT_CN(atype);
CV_Assert( depth <= CV_32F );
CV_Assert( cn == 1 || cn == 3 || cn == 4 );
const Format internalFormats[] =
{
NONE, DEPTH_COMPONENT, NONE, RGB, RGBA
};
const GLenum srcFormats[] =
{
0, gl::DEPTH_COMPONENT, 0, gl::BGR, gl::BGRA
};
create(asize, internalFormats[cn], autoRelease);
switch(kind)
{
case _InputArray::OPENGL_BUFFER:
{
GlBuffer buf = arr.getGlBuffer();
buf.bind(GlBuffer::PIXEL_UNPACK_BUFFER);
impl_->copyFrom(asize.width, asize.height, srcFormats[cn], gl_types[depth], 0);
GlBuffer::unbind(GlBuffer::PIXEL_UNPACK_BUFFER);
break;
}
case _InputArray::GPU_MAT:
{
#if !defined HAVE_CUDA || defined(CUDA_DISABLER)
throw_nocuda();
#else
GpuMat dmat = arr.getGpuMat();
GlBuffer buf(dmat, GlBuffer::PIXEL_UNPACK_BUFFER);
buf.bind(GlBuffer::PIXEL_UNPACK_BUFFER);
impl_->copyFrom(asize.width, asize.height, srcFormats[cn], gl_types[depth], 0);
GlBuffer::unbind(GlBuffer::PIXEL_UNPACK_BUFFER);
#endif
break;
}
default:
{
Mat mat = arr.getMat();
CV_Assert( mat.isContinuous() );
GlBuffer::unbind(GlBuffer::PIXEL_UNPACK_BUFFER);
impl_->copyFrom(asize.width, asize.height, srcFormats[cn], gl_types[depth], mat.data);
}
}
#endif
}
void cv::GlTexture2D::copyTo(OutputArray arr, int ddepth, bool autoRelease) const
{
#ifndef HAVE_OPENGL
(void) arr;
(void) ddepth;
(void) autoRelease;
throw_nogl();
#else
const int kind = arr.kind();
const int cn = format_ == DEPTH_COMPONENT ? 1: format_ == RGB ? 3 : 4;
const GLenum dstFormat = format_ == DEPTH_COMPONENT ? gl::DEPTH_COMPONENT : format_ == RGB ? gl::BGR : gl::BGRA;
switch(kind)
{
case _InputArray::OPENGL_BUFFER:
{
GlBuffer& buf = arr.getGlBufferRef();
buf.create(rows_, cols_, CV_MAKE_TYPE(ddepth, cn), GlBuffer::PIXEL_PACK_BUFFER, autoRelease);
buf.bind(GlBuffer::PIXEL_PACK_BUFFER);
impl_->copyTo(dstFormat, gl_types[ddepth], 0);
GlBuffer::unbind(GlBuffer::PIXEL_PACK_BUFFER);
break;
}
case _InputArray::GPU_MAT:
{
#if !defined HAVE_CUDA || defined(CUDA_DISABLER)
throw_nocuda();
#else
GlBuffer buf(rows_, cols_, CV_MAKE_TYPE(ddepth, cn), GlBuffer::PIXEL_PACK_BUFFER);
buf.bind(GlBuffer::PIXEL_PACK_BUFFER);
impl_->copyTo(dstFormat, gl_types[ddepth], 0);
GlBuffer::unbind(GlBuffer::PIXEL_PACK_BUFFER);
buf.copyTo(arr);
#endif
break;
}
default:
{
arr.create(rows_, cols_, CV_MAKE_TYPE(ddepth, cn));
Mat mat = arr.getMat();
CV_Assert( mat.isContinuous() );
GlBuffer::unbind(GlBuffer::PIXEL_PACK_BUFFER);
impl_->copyTo(dstFormat, gl_types[ddepth], mat.data);
}
}
#endif
}
void cv::GlTexture2D::bind() const
{
#ifndef HAVE_OPENGL
throw_nogl();
#else
impl_->bind();
#endif
}
unsigned int cv::GlTexture2D::texId() const
{
#ifndef HAVE_OPENGL
throw_nogl();
return 0;
#else
return impl_->texId();
#endif
}
template <> void cv::Ptr<cv::GlTexture2D::Impl>::delete_obj()
{
if (obj) delete obj;
}
////////////////////////////////////////////////////////////////////////
// GlArrays
cv::GlArrays::GlArrays() : size_(0)
{
}
void cv::GlArrays::setVertexArray(InputArray vertex)
{
const int cn = vertex.channels();
const int depth = vertex.depth();
CV_Assert( cn == 2 || cn == 3 || cn == 4 );
CV_Assert( depth == CV_16S || depth == CV_32S || depth == CV_32F || depth == CV_64F );
if (vertex.kind() == _InputArray::OPENGL_BUFFER)
vertex_ = vertex.getGlBuffer();
else
vertex_.copyFrom(vertex);
size_ = vertex_.size().area();
}
void cv::GlArrays::resetVertexArray()
{
vertex_.release();
size_ = 0;
}
void cv::GlArrays::setColorArray(InputArray color)
{
const int cn = color.channels();
CV_Assert( cn == 3 || cn == 4 );
if (color.kind() == _InputArray::OPENGL_BUFFER)
color_ = color.getGlBuffer();
else
color_.copyFrom(color);
}
void cv::GlArrays::resetColorArray()
{
color_.release();
}
void cv::GlArrays::setNormalArray(InputArray normal)
{
const int cn = normal.channels();
const int depth = normal.depth();
CV_Assert( cn == 3 );
CV_Assert( depth == CV_8S || depth == CV_16S || depth == CV_32S || depth == CV_32F || depth == CV_64F );
if (normal.kind() == _InputArray::OPENGL_BUFFER)
normal_ = normal.getGlBuffer();
else
normal_.copyFrom(normal);
}
void cv::GlArrays::resetNormalArray()
{
normal_.release();
}
void cv::GlArrays::setTexCoordArray(InputArray texCoord)
{
const int cn = texCoord.channels();
const int depth = texCoord.depth();
CV_Assert( cn >= 1 && cn <= 4 );
CV_Assert( depth == CV_16S || depth == CV_32S || depth == CV_32F || depth == CV_64F );
if (texCoord.kind() == _InputArray::OPENGL_BUFFER)
texCoord_ = texCoord.getGlBuffer();
else
texCoord_.copyFrom(texCoord);
}
void cv::GlArrays::resetTexCoordArray()
{
texCoord_.release();
}
void cv::GlArrays::release()
{
resetVertexArray();
resetColorArray();
resetNormalArray();
resetTexCoordArray();
}
void cv::GlArrays::setAutoRelease(bool flag)
{
vertex_.setAutoRelease(flag);
color_.setAutoRelease(flag);
normal_.setAutoRelease(flag);
texCoord_.setAutoRelease(flag);
}
void cv::GlArrays::bind() const
{
#ifndef HAVE_OPENGL
throw_nogl();
#else
CV_Assert( texCoord_.empty() || texCoord_.size().area() == size_ );
CV_Assert( normal_.empty() || normal_.size().area() == size_ );
CV_Assert( color_.empty() || color_.size().area() == size_ );
if (texCoord_.empty())
{
gl::DisableClientState(gl::TEXTURE_COORD_ARRAY);
CV_CheckGlError();
}
else
{
gl::EnableClientState(gl::TEXTURE_COORD_ARRAY);
CV_CheckGlError();
texCoord_.bind(GlBuffer::ARRAY_BUFFER);
gl::TexCoordPointer(texCoord_.channels(), gl_types[texCoord_.depth()], 0, 0);
CV_CheckGlError();
}
if (normal_.empty())
{
gl::DisableClientState(gl::NORMAL_ARRAY);
CV_CheckGlError();
}
else
{
gl::EnableClientState(gl::NORMAL_ARRAY);
CV_CheckGlError();
normal_.bind(GlBuffer::ARRAY_BUFFER);
gl::NormalPointer(gl_types[normal_.depth()], 0, 0);
CV_CheckGlError();
}
if (color_.empty())
{
gl::DisableClientState(gl::COLOR_ARRAY);
CV_CheckGlError();
}
else
{
gl::EnableClientState(gl::COLOR_ARRAY);
CV_CheckGlError();
color_.bind(GlBuffer::ARRAY_BUFFER);
const int cn = color_.channels();
gl::ColorPointer(cn, gl_types[color_.depth()], 0, 0);
CV_CheckGlError();
}
if (vertex_.empty())
{
gl::DisableClientState(gl::VERTEX_ARRAY);
CV_CheckGlError();
}
else
{
gl::EnableClientState(gl::VERTEX_ARRAY);
CV_CheckGlError();
vertex_.bind(GlBuffer::ARRAY_BUFFER);
gl::VertexPointer(vertex_.channels(), gl_types[vertex_.depth()], 0, 0);
CV_CheckGlError();
}
GlBuffer::unbind(GlBuffer::ARRAY_BUFFER);
#endif
}
////////////////////////////////////////////////////////////////////////
// Rendering
void cv::render(const GlTexture2D& tex, Rect_<double> wndRect, Rect_<double> texRect)
{
#ifndef HAVE_OPENGL
(void) tex;
(void) wndRect;
(void) texRect;
throw_nogl();
#else
if (!tex.empty())
{
gl::MatrixMode(gl::PROJECTION);
gl::LoadIdentity();
gl::Ortho(0.0, 1.0, 1.0, 0.0, -1.0, 1.0);
CV_CheckGlError();
gl::MatrixMode(gl::MODELVIEW);
gl::LoadIdentity();
CV_CheckGlError();
gl::Disable(gl::LIGHTING);
CV_CheckGlError();
tex.bind();
gl::Enable(gl::TEXTURE_2D);
CV_CheckGlError();
gl::TexEnvi(gl::TEXTURE_ENV, gl::TEXTURE_ENV_MODE, gl::REPLACE);
CV_CheckGlError();
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MIN_FILTER, gl::LINEAR);
CV_CheckGlError();
const float vertex[] =
{
wndRect.x, wndRect.y, 0.0f,
wndRect.x, (wndRect.y + wndRect.height), 0.0f,
wndRect.x + wndRect.width, (wndRect.y + wndRect.height), 0.0f,
wndRect.x + wndRect.width, wndRect.y, 0.0f
};
const float texCoords[] =
{
texRect.x, texRect.y,
texRect.x, texRect.y + texRect.height,
texRect.x + texRect.width, texRect.y + texRect.height,
texRect.x + texRect.width, texRect.y
};
GlBuffer::unbind(GlBuffer::ARRAY_BUFFER);
gl::EnableClientState(gl::TEXTURE_COORD_ARRAY);
CV_CheckGlError();
gl::TexCoordPointer(2, gl::FLOAT, 0, texCoords);
CV_CheckGlError();
gl::DisableClientState(gl::NORMAL_ARRAY);
gl::DisableClientState(gl::COLOR_ARRAY);
CV_CheckGlError();
gl::EnableClientState(gl::VERTEX_ARRAY);
CV_CheckGlError();
gl::VertexPointer(3, gl::FLOAT, 0, vertex);
CV_CheckGlError();
gl::DrawArrays(cv::RenderMode::QUADS, 0, 4);
CV_CheckGlError();
}
#endif
}
void cv::render(const GlArrays& arr, int mode, Scalar color)
{
#ifndef HAVE_OPENGL
(void) arr;
(void) mode;
(void) color;
throw_nogl();
#else
if (!arr.empty())
{
gl::Color3d(color[0] / 255.0, color[1] / 255.0, color[2] / 255.0);
arr.bind();
gl::DrawArrays(mode, 0, arr.size());
}
#endif
}
void cv::render(const GlArrays& arr, InputArray indices, int mode, Scalar color)
{
#ifndef HAVE_OPENGL
(void) arr;
(void) indices;
(void) mode;
(void) color;
throw_nogl();
#else
if (!arr.empty() && !indices.empty())
{
gl::Color3d(color[0] / 255.0, color[1] / 255.0, color[2] / 255.0);
arr.bind();
const int kind = indices.kind();
switch (kind)
{
case _InputArray::OPENGL_BUFFER :
{
GlBuffer buf = indices.getGlBuffer();
const int depth = buf.depth();
CV_Assert( buf.channels() == 1 );
CV_Assert( depth <= CV_32S );
GLenum type;
if (depth < CV_16U)
type = gl::UNSIGNED_BYTE;
else if (depth < CV_32S)
type = gl::UNSIGNED_SHORT;
else
type = gl::UNSIGNED_INT;
buf.bind(GlBuffer::ELEMENT_ARRAY_BUFFER);
gl::DrawElements(mode, buf.size().area(), type, 0);
GlBuffer::unbind(GlBuffer::ELEMENT_ARRAY_BUFFER);
break;
}
default:
{
Mat mat = indices.getMat();
const int depth = mat.depth();
CV_Assert( mat.channels() == 1 );
CV_Assert( depth <= CV_32S );
CV_Assert( mat.isContinuous() );
GLenum type;
if (depth < CV_16U)
type = gl::UNSIGNED_BYTE;
else if (depth < CV_32S)
type = gl::UNSIGNED_SHORT;
else
type = gl::UNSIGNED_INT;
GlBuffer::unbind(GlBuffer::ELEMENT_ARRAY_BUFFER);
gl::DrawElements(mode, mat.size().area(), type, mat.data);
}
}
}
#endif
}