mirror of
https://github.com/opencv/opencv.git
synced 2025-07-25 22:57:53 +08:00

* Cuda + OpenGL on ARM There might be multiple ways of getting OpenCV compile on Tegra (NVIDIA Jetson) platform, but mainly they modify CUDA(8,9,10...) source code, this one fixes it for all installations. ( https://devtalk.nvidia.com/default/topic/1007290/jetson-tx2/building-opencv-with-opengl-support-/post/5141945/#5141945 et al.). This way is exactly the same as the one proposed but the code change happens in OpenCV. * Updated, The link provided mentions: cuda8 + 9, I have cuda 10 + 10.1 (and can confirm it is still defined this way). NVIDIA is probably using some other "secret" backend with Jetson.
1899 lines
50 KiB
C++
1899 lines
50 KiB
C++
/*M///////////////////////////////////////////////////////////////////////////////////////
|
|
//
|
|
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
|
//
|
|
// By downloading, copying, installing or using the software you agree to this license.
|
|
// If you do not agree to this license, do not download, install,
|
|
// copy or use the software.
|
|
//
|
|
//
|
|
// License Agreement
|
|
// For Open Source Computer Vision Library
|
|
//
|
|
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
|
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
|
// Third party copyrights are property of their respective owners.
|
|
//
|
|
// Redistribution and use in source and binary forms, with or without modification,
|
|
// are permitted provided that the following conditions are met:
|
|
//
|
|
// * Redistribution's of source code must retain the above copyright notice,
|
|
// this list of conditions and the following disclaimer.
|
|
//
|
|
// * Redistribution's in binary form must reproduce the above copyright notice,
|
|
// this list of conditions and the following disclaimer in the documentation
|
|
// and/or other materials provided with the distribution.
|
|
//
|
|
// * The name of the copyright holders may not be used to endorse or promote products
|
|
// derived from this software without specific prior written permission.
|
|
//
|
|
// This software is provided by the copyright holders and contributors "as is" and
|
|
// any express or implied warranties, including, but not limited to, the implied
|
|
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
|
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
|
// indirect, incidental, special, exemplary, or consequential damages
|
|
// (including, but not limited to, procurement of substitute goods or services;
|
|
// loss of use, data, or profits; or business interruption) however caused
|
|
// and on any theory of liability, whether in contract, strict liability,
|
|
// or tort (including negligence or otherwise) arising in any way out of
|
|
// the use of this software, even if advised of the possibility of such damage.
|
|
//
|
|
//M*/
|
|
|
|
#include "precomp.hpp"
|
|
|
|
#ifdef HAVE_OPENGL
|
|
# include "gl_core_3_1.hpp"
|
|
# ifdef HAVE_CUDA
|
|
# if (defined(__arm__) || defined(__aarch64__)) \
|
|
&& !defined(OPENCV_SKIP_CUDA_OPENGL_ARM_WORKAROUND)
|
|
# include <GL/gl.h>
|
|
# ifndef GL_VERSION
|
|
# define GL_VERSION 0x1F02
|
|
# endif
|
|
# endif
|
|
# include <cuda_gl_interop.h>
|
|
# endif
|
|
#else // HAVE_OPENGL
|
|
# define NO_OPENGL_SUPPORT_ERROR CV_Error(cv::Error::StsBadFunc, "OpenCV was build without OpenGL support")
|
|
#endif // HAVE_OPENGL
|
|
|
|
using namespace cv;
|
|
using namespace cv::cuda;
|
|
|
|
#if defined(_MSC_VER)
|
|
#pragma warning(disable : 4702) // unreachable code
|
|
#endif
|
|
|
|
namespace
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
inline static CV_NORETURN void throw_no_ogl() { CV_Error(cv::Error::OpenGlNotSupported, "The library is compiled without OpenGL support"); }
|
|
#elif defined _DEBUG
|
|
inline static bool checkError(const char* file, const int line, const char* func = 0)
|
|
{
|
|
GLenum err = gl::GetError();
|
|
if (err != gl::NO_ERROR_)
|
|
{
|
|
const char* msg;
|
|
switch (err)
|
|
{
|
|
case gl::INVALID_ENUM:
|
|
msg = "An unacceptable value is specified for an enumerated argument";
|
|
break;
|
|
case gl::INVALID_VALUE:
|
|
msg = "A numeric argument is out of range";
|
|
break;
|
|
case gl::INVALID_OPERATION:
|
|
msg = "The specified operation is not allowed in the current state";
|
|
break;
|
|
case gl::OUT_OF_MEMORY:
|
|
msg = "There is not enough memory left to execute the command";
|
|
break;
|
|
default:
|
|
msg = "Unknown error";
|
|
};
|
|
cv::errorNoReturn(Error::OpenGlApiCallError, func, msg, file, line);
|
|
}
|
|
return true;
|
|
}
|
|
#endif // HAVE_OPENGL
|
|
} // namespace
|
|
|
|
#define CV_CheckGlError() CV_DbgAssert( (checkError(__FILE__, __LINE__, CV_Func)) )
|
|
|
|
#ifdef HAVE_OPENGL
|
|
namespace
|
|
{
|
|
const GLenum gl_types[] = { gl::UNSIGNED_BYTE, gl::BYTE, gl::UNSIGNED_SHORT, gl::SHORT, gl::INT, gl::FLOAT, gl::DOUBLE };
|
|
}
|
|
#endif
|
|
|
|
////////////////////////////////////////////////////////////////////////
|
|
// setGlDevice
|
|
|
|
void cv::cuda::setGlDevice(int device)
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
CV_UNUSED(device);
|
|
throw_no_ogl();
|
|
#else
|
|
#ifndef HAVE_CUDA
|
|
CV_UNUSED(device);
|
|
throw_no_cuda();
|
|
#else
|
|
cudaSafeCall( cudaGLSetGLDevice(device) );
|
|
#endif
|
|
#endif
|
|
}
|
|
|
|
////////////////////////////////////////////////////////////////////////
|
|
// CudaResource
|
|
|
|
#if defined(HAVE_OPENGL) && defined(HAVE_CUDA)
|
|
|
|
namespace
|
|
{
|
|
class CudaResource
|
|
{
|
|
public:
|
|
CudaResource();
|
|
~CudaResource();
|
|
|
|
void registerBuffer(GLuint buffer);
|
|
void release();
|
|
|
|
void copyFrom(const void* src, size_t spitch, size_t width, size_t height, cudaStream_t stream = 0);
|
|
void copyTo(void* dst, size_t dpitch, size_t width, size_t height, cudaStream_t stream = 0);
|
|
|
|
void* map(cudaStream_t stream = 0);
|
|
void unmap(cudaStream_t stream = 0);
|
|
|
|
private:
|
|
cudaGraphicsResource_t resource_;
|
|
GLuint buffer_;
|
|
|
|
class GraphicsMapHolder;
|
|
};
|
|
|
|
CudaResource::CudaResource() : resource_(0), buffer_(0)
|
|
{
|
|
}
|
|
|
|
CudaResource::~CudaResource()
|
|
{
|
|
release();
|
|
}
|
|
|
|
void CudaResource::registerBuffer(GLuint buffer)
|
|
{
|
|
CV_DbgAssert( buffer != 0 );
|
|
|
|
if (buffer_ == buffer)
|
|
return;
|
|
|
|
cudaGraphicsResource_t resource;
|
|
cudaSafeCall( cudaGraphicsGLRegisterBuffer(&resource, buffer, cudaGraphicsMapFlagsNone) );
|
|
|
|
release();
|
|
|
|
resource_ = resource;
|
|
buffer_ = buffer;
|
|
}
|
|
|
|
void CudaResource::release()
|
|
{
|
|
if (resource_)
|
|
cudaGraphicsUnregisterResource(resource_);
|
|
|
|
resource_ = 0;
|
|
buffer_ = 0;
|
|
}
|
|
|
|
class CudaResource::GraphicsMapHolder
|
|
{
|
|
public:
|
|
GraphicsMapHolder(cudaGraphicsResource_t* resource, cudaStream_t stream);
|
|
~GraphicsMapHolder();
|
|
|
|
void reset();
|
|
|
|
private:
|
|
cudaGraphicsResource_t* resource_;
|
|
cudaStream_t stream_;
|
|
};
|
|
|
|
CudaResource::GraphicsMapHolder::GraphicsMapHolder(cudaGraphicsResource_t* resource, cudaStream_t stream) : resource_(resource), stream_(stream)
|
|
{
|
|
if (resource_)
|
|
cudaSafeCall( cudaGraphicsMapResources(1, resource_, stream_) );
|
|
}
|
|
|
|
CudaResource::GraphicsMapHolder::~GraphicsMapHolder()
|
|
{
|
|
if (resource_)
|
|
cudaGraphicsUnmapResources(1, resource_, stream_);
|
|
}
|
|
|
|
void CudaResource::GraphicsMapHolder::reset()
|
|
{
|
|
resource_ = 0;
|
|
}
|
|
|
|
void CudaResource::copyFrom(const void* src, size_t spitch, size_t width, size_t height, cudaStream_t stream)
|
|
{
|
|
CV_DbgAssert( resource_ != 0 );
|
|
|
|
GraphicsMapHolder h(&resource_, stream);
|
|
CV_UNUSED(h);
|
|
|
|
void* dst;
|
|
size_t size;
|
|
cudaSafeCall( cudaGraphicsResourceGetMappedPointer(&dst, &size, resource_) );
|
|
|
|
CV_DbgAssert( width * height == size );
|
|
|
|
if (stream == 0)
|
|
cudaSafeCall( cudaMemcpy2D(dst, width, src, spitch, width, height, cudaMemcpyDeviceToDevice) );
|
|
else
|
|
cudaSafeCall( cudaMemcpy2DAsync(dst, width, src, spitch, width, height, cudaMemcpyDeviceToDevice, stream) );
|
|
}
|
|
|
|
void CudaResource::copyTo(void* dst, size_t dpitch, size_t width, size_t height, cudaStream_t stream)
|
|
{
|
|
CV_DbgAssert( resource_ != 0 );
|
|
|
|
GraphicsMapHolder h(&resource_, stream);
|
|
CV_UNUSED(h);
|
|
|
|
void* src;
|
|
size_t size;
|
|
cudaSafeCall( cudaGraphicsResourceGetMappedPointer(&src, &size, resource_) );
|
|
|
|
CV_DbgAssert( width * height == size );
|
|
|
|
if (stream == 0)
|
|
cudaSafeCall( cudaMemcpy2D(dst, dpitch, src, width, width, height, cudaMemcpyDeviceToDevice) );
|
|
else
|
|
cudaSafeCall( cudaMemcpy2DAsync(dst, dpitch, src, width, width, height, cudaMemcpyDeviceToDevice, stream) );
|
|
}
|
|
|
|
void* CudaResource::map(cudaStream_t stream)
|
|
{
|
|
CV_DbgAssert( resource_ != 0 );
|
|
|
|
GraphicsMapHolder h(&resource_, stream);
|
|
|
|
void* ptr;
|
|
size_t size;
|
|
cudaSafeCall( cudaGraphicsResourceGetMappedPointer(&ptr, &size, resource_) );
|
|
|
|
h.reset();
|
|
|
|
return ptr;
|
|
}
|
|
|
|
void CudaResource::unmap(cudaStream_t stream)
|
|
{
|
|
CV_Assert( resource_ != 0 );
|
|
|
|
cudaGraphicsUnmapResources(1, &resource_, stream);
|
|
}
|
|
}
|
|
|
|
#endif
|
|
|
|
////////////////////////////////////////////////////////////////////////
|
|
// ogl::Buffer
|
|
|
|
#ifndef HAVE_OPENGL
|
|
|
|
class cv::ogl::Buffer::Impl
|
|
{
|
|
};
|
|
|
|
#else
|
|
|
|
class cv::ogl::Buffer::Impl
|
|
{
|
|
public:
|
|
static const Ptr<Impl>& empty();
|
|
|
|
Impl(GLuint bufId, bool autoRelease);
|
|
Impl(GLsizeiptr size, const GLvoid* data, GLenum target, bool autoRelease);
|
|
~Impl();
|
|
|
|
void bind(GLenum target) const;
|
|
|
|
void copyFrom(GLuint srcBuf, GLsizeiptr size);
|
|
|
|
void copyFrom(GLsizeiptr size, const GLvoid* data);
|
|
void copyTo(GLsizeiptr size, GLvoid* data) const;
|
|
|
|
void* mapHost(GLenum access);
|
|
void unmapHost();
|
|
|
|
#ifdef HAVE_CUDA
|
|
void copyFrom(const void* src, size_t spitch, size_t width, size_t height, cudaStream_t stream = 0);
|
|
void copyTo(void* dst, size_t dpitch, size_t width, size_t height, cudaStream_t stream = 0) const;
|
|
|
|
void* mapDevice(cudaStream_t stream = 0);
|
|
void unmapDevice(cudaStream_t stream = 0);
|
|
#endif
|
|
|
|
void setAutoRelease(bool flag) { autoRelease_ = flag; }
|
|
|
|
GLuint bufId() const { return bufId_; }
|
|
|
|
private:
|
|
Impl();
|
|
|
|
GLuint bufId_;
|
|
bool autoRelease_;
|
|
|
|
#ifdef HAVE_CUDA
|
|
mutable CudaResource cudaResource_;
|
|
#endif
|
|
};
|
|
|
|
const Ptr<cv::ogl::Buffer::Impl>& cv::ogl::Buffer::Impl::empty()
|
|
{
|
|
static Ptr<Impl> p(new Impl);
|
|
return p;
|
|
}
|
|
|
|
cv::ogl::Buffer::Impl::Impl() : bufId_(0), autoRelease_(false)
|
|
{
|
|
}
|
|
|
|
cv::ogl::Buffer::Impl::Impl(GLuint abufId, bool autoRelease) : bufId_(abufId), autoRelease_(autoRelease)
|
|
{
|
|
CV_Assert( gl::IsBuffer(abufId) == gl::TRUE_ );
|
|
}
|
|
|
|
cv::ogl::Buffer::Impl::Impl(GLsizeiptr size, const GLvoid* data, GLenum target, bool autoRelease) : bufId_(0), autoRelease_(autoRelease)
|
|
{
|
|
gl::GenBuffers(1, &bufId_);
|
|
CV_CheckGlError();
|
|
|
|
CV_Assert( bufId_ != 0 );
|
|
|
|
gl::BindBuffer(target, bufId_);
|
|
CV_CheckGlError();
|
|
|
|
gl::BufferData(target, size, data, gl::DYNAMIC_DRAW);
|
|
CV_CheckGlError();
|
|
|
|
gl::BindBuffer(target, 0);
|
|
CV_CheckGlError();
|
|
}
|
|
|
|
cv::ogl::Buffer::Impl::~Impl()
|
|
{
|
|
if (autoRelease_ && bufId_)
|
|
gl::DeleteBuffers(1, &bufId_);
|
|
}
|
|
|
|
void cv::ogl::Buffer::Impl::bind(GLenum target) const
|
|
{
|
|
gl::BindBuffer(target, bufId_);
|
|
CV_CheckGlError();
|
|
}
|
|
|
|
void cv::ogl::Buffer::Impl::copyFrom(GLuint srcBuf, GLsizeiptr size)
|
|
{
|
|
gl::BindBuffer(gl::COPY_WRITE_BUFFER, bufId_);
|
|
CV_CheckGlError();
|
|
|
|
gl::BindBuffer(gl::COPY_READ_BUFFER, srcBuf);
|
|
CV_CheckGlError();
|
|
|
|
gl::CopyBufferSubData(gl::COPY_READ_BUFFER, gl::COPY_WRITE_BUFFER, 0, 0, size);
|
|
CV_CheckGlError();
|
|
}
|
|
|
|
void cv::ogl::Buffer::Impl::copyFrom(GLsizeiptr size, const GLvoid* data)
|
|
{
|
|
gl::BindBuffer(gl::COPY_WRITE_BUFFER, bufId_);
|
|
CV_CheckGlError();
|
|
|
|
gl::BufferSubData(gl::COPY_WRITE_BUFFER, 0, size, data);
|
|
CV_CheckGlError();
|
|
}
|
|
|
|
void cv::ogl::Buffer::Impl::copyTo(GLsizeiptr size, GLvoid* data) const
|
|
{
|
|
gl::BindBuffer(gl::COPY_READ_BUFFER, bufId_);
|
|
CV_CheckGlError();
|
|
|
|
gl::GetBufferSubData(gl::COPY_READ_BUFFER, 0, size, data);
|
|
CV_CheckGlError();
|
|
}
|
|
|
|
void* cv::ogl::Buffer::Impl::mapHost(GLenum access)
|
|
{
|
|
gl::BindBuffer(gl::COPY_READ_BUFFER, bufId_);
|
|
CV_CheckGlError();
|
|
|
|
GLvoid* data = gl::MapBuffer(gl::COPY_READ_BUFFER, access);
|
|
CV_CheckGlError();
|
|
|
|
return data;
|
|
}
|
|
|
|
void cv::ogl::Buffer::Impl::unmapHost()
|
|
{
|
|
gl::UnmapBuffer(gl::COPY_READ_BUFFER);
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
|
|
void cv::ogl::Buffer::Impl::copyFrom(const void* src, size_t spitch, size_t width, size_t height, cudaStream_t stream)
|
|
{
|
|
cudaResource_.registerBuffer(bufId_);
|
|
cudaResource_.copyFrom(src, spitch, width, height, stream);
|
|
}
|
|
|
|
void cv::ogl::Buffer::Impl::copyTo(void* dst, size_t dpitch, size_t width, size_t height, cudaStream_t stream) const
|
|
{
|
|
cudaResource_.registerBuffer(bufId_);
|
|
cudaResource_.copyTo(dst, dpitch, width, height, stream);
|
|
}
|
|
|
|
void* cv::ogl::Buffer::Impl::mapDevice(cudaStream_t stream)
|
|
{
|
|
cudaResource_.registerBuffer(bufId_);
|
|
return cudaResource_.map(stream);
|
|
}
|
|
|
|
void cv::ogl::Buffer::Impl::unmapDevice(cudaStream_t stream)
|
|
{
|
|
cudaResource_.unmap(stream);
|
|
}
|
|
|
|
#endif // HAVE_CUDA
|
|
|
|
#endif // HAVE_OPENGL
|
|
|
|
cv::ogl::Buffer::Buffer() : rows_(0), cols_(0), type_(0)
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
throw_no_ogl();
|
|
#else
|
|
impl_ = Impl::empty();
|
|
#endif
|
|
}
|
|
|
|
cv::ogl::Buffer::Buffer(int arows, int acols, int atype, unsigned int abufId, bool autoRelease) : rows_(0), cols_(0), type_(0)
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
CV_UNUSED(arows);
|
|
CV_UNUSED(acols);
|
|
CV_UNUSED(atype);
|
|
CV_UNUSED(abufId);
|
|
CV_UNUSED(autoRelease);
|
|
throw_no_ogl();
|
|
#else
|
|
impl_.reset(new Impl(abufId, autoRelease));
|
|
rows_ = arows;
|
|
cols_ = acols;
|
|
type_ = atype;
|
|
#endif
|
|
}
|
|
|
|
cv::ogl::Buffer::Buffer(Size asize, int atype, unsigned int abufId, bool autoRelease) : rows_(0), cols_(0), type_(0)
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
CV_UNUSED(asize);
|
|
CV_UNUSED(atype);
|
|
CV_UNUSED(abufId);
|
|
CV_UNUSED(autoRelease);
|
|
throw_no_ogl();
|
|
#else
|
|
impl_.reset(new Impl(abufId, autoRelease));
|
|
rows_ = asize.height;
|
|
cols_ = asize.width;
|
|
type_ = atype;
|
|
#endif
|
|
}
|
|
|
|
cv::ogl::Buffer::Buffer(InputArray arr, Target target, bool autoRelease) : rows_(0), cols_(0), type_(0)
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
CV_UNUSED(arr);
|
|
CV_UNUSED(target);
|
|
CV_UNUSED(autoRelease);
|
|
throw_no_ogl();
|
|
#else
|
|
const int kind = arr.kind();
|
|
|
|
switch (kind)
|
|
{
|
|
case _InputArray::OPENGL_BUFFER:
|
|
case _InputArray::CUDA_GPU_MAT:
|
|
copyFrom(arr, target, autoRelease);
|
|
break;
|
|
|
|
default:
|
|
{
|
|
Mat mat = arr.getMat();
|
|
CV_Assert( mat.isContinuous() );
|
|
const GLsizeiptr asize = mat.rows * mat.cols * mat.elemSize();
|
|
impl_.reset(new Impl(asize, mat.data, target, autoRelease));
|
|
rows_ = mat.rows;
|
|
cols_ = mat.cols;
|
|
type_ = mat.type();
|
|
break;
|
|
}
|
|
}
|
|
#endif
|
|
}
|
|
|
|
void cv::ogl::Buffer::create(int arows, int acols, int atype, Target target, bool autoRelease)
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
CV_UNUSED(arows);
|
|
CV_UNUSED(acols);
|
|
CV_UNUSED(atype);
|
|
CV_UNUSED(target);
|
|
CV_UNUSED(autoRelease);
|
|
throw_no_ogl();
|
|
#else
|
|
if (rows_ != arows || cols_ != acols || type_ != atype)
|
|
{
|
|
const GLsizeiptr asize = arows * acols * CV_ELEM_SIZE(atype);
|
|
impl_.reset(new Impl(asize, 0, target, autoRelease));
|
|
rows_ = arows;
|
|
cols_ = acols;
|
|
type_ = atype;
|
|
}
|
|
#endif
|
|
}
|
|
|
|
void cv::ogl::Buffer::release()
|
|
{
|
|
#ifdef HAVE_OPENGL
|
|
if (impl_)
|
|
impl_->setAutoRelease(true);
|
|
impl_ = Impl::empty();
|
|
rows_ = 0;
|
|
cols_ = 0;
|
|
type_ = 0;
|
|
#endif
|
|
}
|
|
|
|
void cv::ogl::Buffer::setAutoRelease(bool flag)
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
CV_UNUSED(flag);
|
|
throw_no_ogl();
|
|
#else
|
|
impl_->setAutoRelease(flag);
|
|
#endif
|
|
}
|
|
|
|
void cv::ogl::Buffer::copyFrom(InputArray arr, Target target, bool autoRelease)
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
CV_UNUSED(arr);
|
|
CV_UNUSED(target);
|
|
CV_UNUSED(autoRelease);
|
|
throw_no_ogl();
|
|
#else
|
|
const int kind = arr.kind();
|
|
|
|
const Size asize = arr.size();
|
|
const int atype = arr.type();
|
|
create(asize, atype, target, autoRelease);
|
|
|
|
switch (kind)
|
|
{
|
|
case _InputArray::OPENGL_BUFFER:
|
|
{
|
|
ogl::Buffer buf = arr.getOGlBuffer();
|
|
impl_->copyFrom(buf.bufId(), asize.area() * CV_ELEM_SIZE(atype));
|
|
break;
|
|
}
|
|
|
|
case _InputArray::CUDA_GPU_MAT:
|
|
{
|
|
#ifndef HAVE_CUDA
|
|
throw_no_cuda();
|
|
#else
|
|
GpuMat dmat = arr.getGpuMat();
|
|
impl_->copyFrom(dmat.data, dmat.step, dmat.cols * dmat.elemSize(), dmat.rows);
|
|
#endif
|
|
|
|
break;
|
|
}
|
|
|
|
default:
|
|
{
|
|
Mat mat = arr.getMat();
|
|
CV_Assert( mat.isContinuous() );
|
|
impl_->copyFrom(asize.area() * CV_ELEM_SIZE(atype), mat.data);
|
|
}
|
|
}
|
|
#endif
|
|
}
|
|
|
|
void cv::ogl::Buffer::copyFrom(InputArray arr, cuda::Stream& stream, Target target, bool autoRelease)
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
CV_UNUSED(arr);
|
|
CV_UNUSED(stream);
|
|
CV_UNUSED(target);
|
|
CV_UNUSED(autoRelease);
|
|
throw_no_ogl();
|
|
#else
|
|
#ifndef HAVE_CUDA
|
|
CV_UNUSED(arr);
|
|
CV_UNUSED(stream);
|
|
CV_UNUSED(target);
|
|
CV_UNUSED(autoRelease);
|
|
throw_no_cuda();
|
|
#else
|
|
GpuMat dmat = arr.getGpuMat();
|
|
|
|
create(dmat.size(), dmat.type(), target, autoRelease);
|
|
|
|
impl_->copyFrom(dmat.data, dmat.step, dmat.cols * dmat.elemSize(), dmat.rows, cuda::StreamAccessor::getStream(stream));
|
|
#endif
|
|
#endif
|
|
}
|
|
|
|
void cv::ogl::Buffer::copyTo(OutputArray arr) const
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
CV_UNUSED(arr);
|
|
throw_no_ogl();
|
|
#else
|
|
const int kind = arr.kind();
|
|
|
|
switch (kind)
|
|
{
|
|
case _InputArray::OPENGL_BUFFER:
|
|
{
|
|
arr.getOGlBufferRef().copyFrom(*this);
|
|
break;
|
|
}
|
|
|
|
case _InputArray::CUDA_GPU_MAT:
|
|
{
|
|
#ifndef HAVE_CUDA
|
|
throw_no_cuda();
|
|
#else
|
|
GpuMat& dmat = arr.getGpuMatRef();
|
|
dmat.create(rows_, cols_, type_);
|
|
impl_->copyTo(dmat.data, dmat.step, dmat.cols * dmat.elemSize(), dmat.rows);
|
|
#endif
|
|
|
|
break;
|
|
}
|
|
|
|
default:
|
|
{
|
|
arr.create(rows_, cols_, type_);
|
|
Mat mat = arr.getMat();
|
|
CV_Assert( mat.isContinuous() );
|
|
impl_->copyTo(mat.rows * mat.cols * mat.elemSize(), mat.data);
|
|
}
|
|
}
|
|
#endif
|
|
}
|
|
|
|
void cv::ogl::Buffer::copyTo(OutputArray arr, cuda::Stream& stream) const
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
CV_UNUSED(arr);
|
|
CV_UNUSED(stream);
|
|
throw_no_ogl();
|
|
#else
|
|
#ifndef HAVE_CUDA
|
|
CV_UNUSED(arr);
|
|
CV_UNUSED(stream);
|
|
throw_no_cuda();
|
|
#else
|
|
arr.create(rows_, cols_, type_);
|
|
GpuMat dmat = arr.getGpuMat();
|
|
impl_->copyTo(dmat.data, dmat.step, dmat.cols * dmat.elemSize(), dmat.rows, cuda::StreamAccessor::getStream(stream));
|
|
#endif
|
|
#endif
|
|
}
|
|
|
|
cv::ogl::Buffer cv::ogl::Buffer::clone(Target target, bool autoRelease) const
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
CV_UNUSED(target);
|
|
CV_UNUSED(autoRelease);
|
|
throw_no_ogl();
|
|
#else
|
|
ogl::Buffer buf;
|
|
buf.copyFrom(*this, target, autoRelease);
|
|
return buf;
|
|
#endif
|
|
}
|
|
|
|
void cv::ogl::Buffer::bind(Target target) const
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
CV_UNUSED(target);
|
|
throw_no_ogl();
|
|
#else
|
|
impl_->bind(target);
|
|
#endif
|
|
}
|
|
|
|
void cv::ogl::Buffer::unbind(Target target)
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
CV_UNUSED(target);
|
|
throw_no_ogl();
|
|
#else
|
|
gl::BindBuffer(target, 0);
|
|
CV_CheckGlError();
|
|
#endif
|
|
}
|
|
|
|
Mat cv::ogl::Buffer::mapHost(Access access)
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
CV_UNUSED(access);
|
|
throw_no_ogl();
|
|
#else
|
|
return Mat(rows_, cols_, type_, impl_->mapHost(access));
|
|
#endif
|
|
}
|
|
|
|
void cv::ogl::Buffer::unmapHost()
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
throw_no_ogl();
|
|
#else
|
|
return impl_->unmapHost();
|
|
#endif
|
|
}
|
|
|
|
GpuMat cv::ogl::Buffer::mapDevice()
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
throw_no_ogl();
|
|
#else
|
|
#ifndef HAVE_CUDA
|
|
throw_no_cuda();
|
|
#else
|
|
return GpuMat(rows_, cols_, type_, impl_->mapDevice());
|
|
#endif
|
|
#endif
|
|
}
|
|
|
|
void cv::ogl::Buffer::unmapDevice()
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
throw_no_ogl();
|
|
#else
|
|
#ifndef HAVE_CUDA
|
|
throw_no_cuda();
|
|
#else
|
|
impl_->unmapDevice();
|
|
#endif
|
|
#endif
|
|
}
|
|
|
|
cuda::GpuMat cv::ogl::Buffer::mapDevice(cuda::Stream& stream)
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
CV_UNUSED(stream);
|
|
throw_no_ogl();
|
|
#else
|
|
#ifndef HAVE_CUDA
|
|
CV_UNUSED(stream);
|
|
throw_no_cuda();
|
|
#else
|
|
return GpuMat(rows_, cols_, type_, impl_->mapDevice(cuda::StreamAccessor::getStream(stream)));
|
|
#endif
|
|
#endif
|
|
}
|
|
|
|
void cv::ogl::Buffer::unmapDevice(cuda::Stream& stream)
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
CV_UNUSED(stream);
|
|
throw_no_ogl();
|
|
#else
|
|
#ifndef HAVE_CUDA
|
|
CV_UNUSED(stream);
|
|
throw_no_cuda();
|
|
#else
|
|
impl_->unmapDevice(cuda::StreamAccessor::getStream(stream));
|
|
#endif
|
|
#endif
|
|
}
|
|
|
|
unsigned int cv::ogl::Buffer::bufId() const
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
throw_no_ogl();
|
|
#else
|
|
return impl_->bufId();
|
|
#endif
|
|
}
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////////////////////
|
|
// ogl::Texture
|
|
|
|
#ifndef HAVE_OPENGL
|
|
|
|
class cv::ogl::Texture2D::Impl
|
|
{
|
|
};
|
|
|
|
#else
|
|
|
|
class cv::ogl::Texture2D::Impl
|
|
{
|
|
public:
|
|
static const Ptr<Impl> empty();
|
|
|
|
Impl(GLuint texId, bool autoRelease);
|
|
Impl(GLint internalFormat, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid* pixels, bool autoRelease);
|
|
~Impl();
|
|
|
|
void copyFrom(GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid *pixels);
|
|
void copyTo(GLenum format, GLenum type, GLvoid* pixels) const;
|
|
|
|
void bind() const;
|
|
|
|
void setAutoRelease(bool flag) { autoRelease_ = flag; }
|
|
|
|
GLuint texId() const { return texId_; }
|
|
|
|
private:
|
|
Impl();
|
|
|
|
GLuint texId_;
|
|
bool autoRelease_;
|
|
};
|
|
|
|
const Ptr<cv::ogl::Texture2D::Impl> cv::ogl::Texture2D::Impl::empty()
|
|
{
|
|
static Ptr<Impl> p(new Impl);
|
|
return p;
|
|
}
|
|
|
|
cv::ogl::Texture2D::Impl::Impl() : texId_(0), autoRelease_(false)
|
|
{
|
|
}
|
|
|
|
cv::ogl::Texture2D::Impl::Impl(GLuint atexId, bool autoRelease) : texId_(atexId), autoRelease_(autoRelease)
|
|
{
|
|
CV_Assert( gl::IsTexture(atexId) == gl::TRUE_ );
|
|
}
|
|
|
|
cv::ogl::Texture2D::Impl::Impl(GLint internalFormat, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid* pixels, bool autoRelease) : texId_(0), autoRelease_(autoRelease)
|
|
{
|
|
gl::GenTextures(1, &texId_);
|
|
CV_CheckGlError();
|
|
|
|
CV_Assert(texId_ != 0);
|
|
|
|
gl::BindTexture(gl::TEXTURE_2D, texId_);
|
|
CV_CheckGlError();
|
|
|
|
gl::PixelStorei(gl::UNPACK_ALIGNMENT, 1);
|
|
CV_CheckGlError();
|
|
|
|
gl::TexImage2D(gl::TEXTURE_2D, 0, internalFormat, width, height, 0, format, type, pixels);
|
|
CV_CheckGlError();
|
|
|
|
gl::GenerateMipmap(gl::TEXTURE_2D);
|
|
CV_CheckGlError();
|
|
}
|
|
|
|
cv::ogl::Texture2D::Impl::~Impl()
|
|
{
|
|
if (autoRelease_ && texId_)
|
|
gl::DeleteTextures(1, &texId_);
|
|
}
|
|
|
|
void cv::ogl::Texture2D::Impl::copyFrom(GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid *pixels)
|
|
{
|
|
gl::BindTexture(gl::TEXTURE_2D, texId_);
|
|
CV_CheckGlError();
|
|
|
|
gl::PixelStorei(gl::UNPACK_ALIGNMENT, 1);
|
|
CV_CheckGlError();
|
|
|
|
gl::TexSubImage2D(gl::TEXTURE_2D, 0, 0, 0, width, height, format, type, pixels);
|
|
CV_CheckGlError();
|
|
|
|
gl::GenerateMipmap(gl::TEXTURE_2D);
|
|
CV_CheckGlError();
|
|
}
|
|
|
|
void cv::ogl::Texture2D::Impl::copyTo(GLenum format, GLenum type, GLvoid* pixels) const
|
|
{
|
|
gl::BindTexture(gl::TEXTURE_2D, texId_);
|
|
CV_CheckGlError();
|
|
|
|
gl::PixelStorei(gl::PACK_ALIGNMENT, 1);
|
|
CV_CheckGlError();
|
|
|
|
gl::GetTexImage(gl::TEXTURE_2D, 0, format, type, pixels);
|
|
CV_CheckGlError();
|
|
}
|
|
|
|
void cv::ogl::Texture2D::Impl::bind() const
|
|
{
|
|
gl::BindTexture(gl::TEXTURE_2D, texId_);
|
|
CV_CheckGlError();
|
|
}
|
|
|
|
#endif // HAVE_OPENGL
|
|
|
|
cv::ogl::Texture2D::Texture2D() : rows_(0), cols_(0), format_(NONE)
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
throw_no_ogl();
|
|
#else
|
|
impl_ = Impl::empty();
|
|
#endif
|
|
}
|
|
|
|
cv::ogl::Texture2D::Texture2D(int arows, int acols, Format aformat, unsigned int atexId, bool autoRelease) : rows_(0), cols_(0), format_(NONE)
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
CV_UNUSED(arows);
|
|
CV_UNUSED(acols);
|
|
CV_UNUSED(aformat);
|
|
CV_UNUSED(atexId);
|
|
CV_UNUSED(autoRelease);
|
|
throw_no_ogl();
|
|
#else
|
|
impl_.reset(new Impl(atexId, autoRelease));
|
|
rows_ = arows;
|
|
cols_ = acols;
|
|
format_ = aformat;
|
|
#endif
|
|
}
|
|
|
|
cv::ogl::Texture2D::Texture2D(Size asize, Format aformat, unsigned int atexId, bool autoRelease) : rows_(0), cols_(0), format_(NONE)
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
CV_UNUSED(asize);
|
|
CV_UNUSED(aformat);
|
|
CV_UNUSED(atexId);
|
|
CV_UNUSED(autoRelease);
|
|
throw_no_ogl();
|
|
#else
|
|
impl_.reset(new Impl(atexId, autoRelease));
|
|
rows_ = asize.height;
|
|
cols_ = asize.width;
|
|
format_ = aformat;
|
|
#endif
|
|
}
|
|
|
|
cv::ogl::Texture2D::Texture2D(InputArray arr, bool autoRelease) : rows_(0), cols_(0), format_(NONE)
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
CV_UNUSED(arr);
|
|
CV_UNUSED(autoRelease);
|
|
throw_no_ogl();
|
|
#else
|
|
const int kind = arr.kind();
|
|
|
|
const Size asize = arr.size();
|
|
const int atype = arr.type();
|
|
|
|
const int depth = CV_MAT_DEPTH(atype);
|
|
const int cn = CV_MAT_CN(atype);
|
|
|
|
CV_Assert( depth <= CV_32F );
|
|
CV_Assert( cn == 1 || cn == 3 || cn == 4 );
|
|
|
|
const Format internalFormats[] =
|
|
{
|
|
NONE, DEPTH_COMPONENT, NONE, RGB, RGBA
|
|
};
|
|
const GLenum srcFormats[] =
|
|
{
|
|
0, gl::DEPTH_COMPONENT, 0, gl::BGR, gl::BGRA
|
|
};
|
|
|
|
switch (kind)
|
|
{
|
|
case _InputArray::OPENGL_BUFFER:
|
|
{
|
|
ogl::Buffer buf = arr.getOGlBuffer();
|
|
buf.bind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
|
|
impl_.reset(new Impl(internalFormats[cn], asize.width, asize.height, srcFormats[cn], gl_types[depth], 0, autoRelease));
|
|
ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
|
|
break;
|
|
}
|
|
|
|
case _InputArray::CUDA_GPU_MAT:
|
|
{
|
|
#ifndef HAVE_CUDA
|
|
throw_no_cuda();
|
|
#else
|
|
GpuMat dmat = arr.getGpuMat();
|
|
ogl::Buffer buf(dmat, ogl::Buffer::PIXEL_UNPACK_BUFFER);
|
|
buf.setAutoRelease(true);
|
|
buf.bind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
|
|
impl_.reset(new Impl(internalFormats[cn], asize.width, asize.height, srcFormats[cn], gl_types[depth], 0, autoRelease));
|
|
ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
|
|
#endif
|
|
|
|
break;
|
|
}
|
|
|
|
default:
|
|
{
|
|
Mat mat = arr.getMat();
|
|
CV_Assert( mat.isContinuous() );
|
|
ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
|
|
impl_.reset(new Impl(internalFormats[cn], asize.width, asize.height, srcFormats[cn], gl_types[depth], mat.data, autoRelease));
|
|
break;
|
|
}
|
|
}
|
|
|
|
rows_ = asize.height;
|
|
cols_ = asize.width;
|
|
format_ = internalFormats[cn];
|
|
#endif
|
|
}
|
|
|
|
void cv::ogl::Texture2D::create(int arows, int acols, Format aformat, bool autoRelease)
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
CV_UNUSED(arows);
|
|
CV_UNUSED(acols);
|
|
CV_UNUSED(aformat);
|
|
CV_UNUSED(autoRelease);
|
|
throw_no_ogl();
|
|
#else
|
|
if (rows_ != arows || cols_ != acols || format_ != aformat)
|
|
{
|
|
ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
|
|
impl_.reset(new Impl(aformat, acols, arows, aformat, gl::FLOAT, 0, autoRelease));
|
|
rows_ = arows;
|
|
cols_ = acols;
|
|
format_ = aformat;
|
|
}
|
|
#endif
|
|
}
|
|
|
|
void cv::ogl::Texture2D::release()
|
|
{
|
|
#ifdef HAVE_OPENGL
|
|
if (impl_)
|
|
impl_->setAutoRelease(true);
|
|
impl_ = Impl::empty();
|
|
rows_ = 0;
|
|
cols_ = 0;
|
|
format_ = NONE;
|
|
#endif
|
|
}
|
|
|
|
void cv::ogl::Texture2D::setAutoRelease(bool flag)
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
CV_UNUSED(flag);
|
|
throw_no_ogl();
|
|
#else
|
|
impl_->setAutoRelease(flag);
|
|
#endif
|
|
}
|
|
|
|
void cv::ogl::Texture2D::copyFrom(InputArray arr, bool autoRelease)
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
CV_UNUSED(arr);
|
|
CV_UNUSED(autoRelease);
|
|
throw_no_ogl();
|
|
#else
|
|
const int kind = arr.kind();
|
|
|
|
const Size asize = arr.size();
|
|
const int atype = arr.type();
|
|
|
|
const int depth = CV_MAT_DEPTH(atype);
|
|
const int cn = CV_MAT_CN(atype);
|
|
|
|
CV_Assert( depth <= CV_32F );
|
|
CV_Assert( cn == 1 || cn == 3 || cn == 4 );
|
|
|
|
const Format internalFormats[] =
|
|
{
|
|
NONE, DEPTH_COMPONENT, NONE, RGB, RGBA
|
|
};
|
|
const GLenum srcFormats[] =
|
|
{
|
|
0, gl::DEPTH_COMPONENT, 0, gl::BGR, gl::BGRA
|
|
};
|
|
|
|
create(asize, internalFormats[cn], autoRelease);
|
|
|
|
switch(kind)
|
|
{
|
|
case _InputArray::OPENGL_BUFFER:
|
|
{
|
|
ogl::Buffer buf = arr.getOGlBuffer();
|
|
buf.bind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
|
|
impl_->copyFrom(asize.width, asize.height, srcFormats[cn], gl_types[depth], 0);
|
|
ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
|
|
break;
|
|
}
|
|
|
|
case _InputArray::CUDA_GPU_MAT:
|
|
{
|
|
#ifndef HAVE_CUDA
|
|
throw_no_cuda();
|
|
#else
|
|
GpuMat dmat = arr.getGpuMat();
|
|
ogl::Buffer buf(dmat, ogl::Buffer::PIXEL_UNPACK_BUFFER);
|
|
buf.setAutoRelease(true);
|
|
buf.bind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
|
|
impl_->copyFrom(asize.width, asize.height, srcFormats[cn], gl_types[depth], 0);
|
|
ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
|
|
#endif
|
|
|
|
break;
|
|
}
|
|
|
|
default:
|
|
{
|
|
Mat mat = arr.getMat();
|
|
CV_Assert( mat.isContinuous() );
|
|
ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
|
|
impl_->copyFrom(asize.width, asize.height, srcFormats[cn], gl_types[depth], mat.data);
|
|
}
|
|
}
|
|
#endif
|
|
}
|
|
|
|
void cv::ogl::Texture2D::copyTo(OutputArray arr, int ddepth, bool autoRelease) const
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
CV_UNUSED(arr);
|
|
CV_UNUSED(ddepth);
|
|
CV_UNUSED(autoRelease);
|
|
throw_no_ogl();
|
|
#else
|
|
const int kind = arr.kind();
|
|
|
|
const int cn = format_ == DEPTH_COMPONENT ? 1: format_ == RGB ? 3 : 4;
|
|
const GLenum dstFormat = format_ == DEPTH_COMPONENT ? gl::DEPTH_COMPONENT : format_ == RGB ? gl::BGR : gl::BGRA;
|
|
|
|
switch(kind)
|
|
{
|
|
case _InputArray::OPENGL_BUFFER:
|
|
{
|
|
ogl::Buffer& buf = arr.getOGlBufferRef();
|
|
buf.create(rows_, cols_, CV_MAKE_TYPE(ddepth, cn), ogl::Buffer::PIXEL_PACK_BUFFER, autoRelease);
|
|
buf.bind(ogl::Buffer::PIXEL_PACK_BUFFER);
|
|
impl_->copyTo(dstFormat, gl_types[ddepth], 0);
|
|
ogl::Buffer::unbind(ogl::Buffer::PIXEL_PACK_BUFFER);
|
|
break;
|
|
}
|
|
|
|
case _InputArray::CUDA_GPU_MAT:
|
|
{
|
|
#ifndef HAVE_CUDA
|
|
throw_no_cuda();
|
|
#else
|
|
ogl::Buffer buf(rows_, cols_, CV_MAKE_TYPE(ddepth, cn), ogl::Buffer::PIXEL_PACK_BUFFER);
|
|
buf.setAutoRelease(true);
|
|
buf.bind(ogl::Buffer::PIXEL_PACK_BUFFER);
|
|
impl_->copyTo(dstFormat, gl_types[ddepth], 0);
|
|
ogl::Buffer::unbind(ogl::Buffer::PIXEL_PACK_BUFFER);
|
|
buf.copyTo(arr);
|
|
#endif
|
|
|
|
break;
|
|
}
|
|
|
|
default:
|
|
{
|
|
arr.create(rows_, cols_, CV_MAKE_TYPE(ddepth, cn));
|
|
Mat mat = arr.getMat();
|
|
CV_Assert( mat.isContinuous() );
|
|
ogl::Buffer::unbind(ogl::Buffer::PIXEL_PACK_BUFFER);
|
|
impl_->copyTo(dstFormat, gl_types[ddepth], mat.data);
|
|
}
|
|
}
|
|
#endif
|
|
}
|
|
|
|
void cv::ogl::Texture2D::bind() const
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
throw_no_ogl();
|
|
#else
|
|
impl_->bind();
|
|
#endif
|
|
}
|
|
|
|
unsigned int cv::ogl::Texture2D::texId() const
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
throw_no_ogl();
|
|
#else
|
|
return impl_->texId();
|
|
#endif
|
|
}
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////////
|
|
// ogl::Arrays
|
|
|
|
void cv::ogl::Arrays::setVertexArray(InputArray vertex)
|
|
{
|
|
const int cn = vertex.channels();
|
|
const int depth = vertex.depth();
|
|
|
|
CV_Assert( cn == 2 || cn == 3 || cn == 4 );
|
|
CV_Assert( depth == CV_16S || depth == CV_32S || depth == CV_32F || depth == CV_64F );
|
|
|
|
if (vertex.kind() == _InputArray::OPENGL_BUFFER)
|
|
vertex_ = vertex.getOGlBuffer();
|
|
else
|
|
vertex_.copyFrom(vertex);
|
|
|
|
size_ = vertex_.size().area();
|
|
}
|
|
|
|
void cv::ogl::Arrays::resetVertexArray()
|
|
{
|
|
vertex_.release();
|
|
size_ = 0;
|
|
}
|
|
|
|
void cv::ogl::Arrays::setColorArray(InputArray color)
|
|
{
|
|
const int cn = color.channels();
|
|
|
|
CV_Assert( cn == 3 || cn == 4 );
|
|
|
|
if (color.kind() == _InputArray::OPENGL_BUFFER)
|
|
color_ = color.getOGlBuffer();
|
|
else
|
|
color_.copyFrom(color);
|
|
}
|
|
|
|
void cv::ogl::Arrays::resetColorArray()
|
|
{
|
|
color_.release();
|
|
}
|
|
|
|
void cv::ogl::Arrays::setNormalArray(InputArray normal)
|
|
{
|
|
const int cn = normal.channels();
|
|
const int depth = normal.depth();
|
|
|
|
CV_Assert( cn == 3 );
|
|
CV_Assert( depth == CV_8S || depth == CV_16S || depth == CV_32S || depth == CV_32F || depth == CV_64F );
|
|
|
|
if (normal.kind() == _InputArray::OPENGL_BUFFER)
|
|
normal_ = normal.getOGlBuffer();
|
|
else
|
|
normal_.copyFrom(normal);
|
|
}
|
|
|
|
void cv::ogl::Arrays::resetNormalArray()
|
|
{
|
|
normal_.release();
|
|
}
|
|
|
|
void cv::ogl::Arrays::setTexCoordArray(InputArray texCoord)
|
|
{
|
|
const int cn = texCoord.channels();
|
|
const int depth = texCoord.depth();
|
|
|
|
CV_Assert( cn >= 1 && cn <= 4 );
|
|
CV_Assert( depth == CV_16S || depth == CV_32S || depth == CV_32F || depth == CV_64F );
|
|
|
|
if (texCoord.kind() == _InputArray::OPENGL_BUFFER)
|
|
texCoord_ = texCoord.getOGlBuffer();
|
|
else
|
|
texCoord_.copyFrom(texCoord);
|
|
}
|
|
|
|
void cv::ogl::Arrays::resetTexCoordArray()
|
|
{
|
|
texCoord_.release();
|
|
}
|
|
|
|
void cv::ogl::Arrays::release()
|
|
{
|
|
resetVertexArray();
|
|
resetColorArray();
|
|
resetNormalArray();
|
|
resetTexCoordArray();
|
|
}
|
|
|
|
void cv::ogl::Arrays::setAutoRelease(bool flag)
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
CV_UNUSED(flag);
|
|
throw_no_ogl();
|
|
#else
|
|
vertex_.setAutoRelease(flag);
|
|
color_.setAutoRelease(flag);
|
|
normal_.setAutoRelease(flag);
|
|
texCoord_.setAutoRelease(flag);
|
|
#endif
|
|
}
|
|
|
|
void cv::ogl::Arrays::bind() const
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
throw_no_ogl();
|
|
#else
|
|
CV_Assert( texCoord_.empty() || texCoord_.size().area() == size_ );
|
|
CV_Assert( normal_.empty() || normal_.size().area() == size_ );
|
|
CV_Assert( color_.empty() || color_.size().area() == size_ );
|
|
|
|
if (texCoord_.empty())
|
|
{
|
|
gl::DisableClientState(gl::TEXTURE_COORD_ARRAY);
|
|
CV_CheckGlError();
|
|
}
|
|
else
|
|
{
|
|
gl::EnableClientState(gl::TEXTURE_COORD_ARRAY);
|
|
CV_CheckGlError();
|
|
|
|
texCoord_.bind(ogl::Buffer::ARRAY_BUFFER);
|
|
|
|
gl::TexCoordPointer(texCoord_.channels(), gl_types[texCoord_.depth()], 0, 0);
|
|
CV_CheckGlError();
|
|
}
|
|
|
|
if (normal_.empty())
|
|
{
|
|
gl::DisableClientState(gl::NORMAL_ARRAY);
|
|
CV_CheckGlError();
|
|
}
|
|
else
|
|
{
|
|
gl::EnableClientState(gl::NORMAL_ARRAY);
|
|
CV_CheckGlError();
|
|
|
|
normal_.bind(ogl::Buffer::ARRAY_BUFFER);
|
|
|
|
gl::NormalPointer(gl_types[normal_.depth()], 0, 0);
|
|
CV_CheckGlError();
|
|
}
|
|
|
|
if (color_.empty())
|
|
{
|
|
gl::DisableClientState(gl::COLOR_ARRAY);
|
|
CV_CheckGlError();
|
|
}
|
|
else
|
|
{
|
|
gl::EnableClientState(gl::COLOR_ARRAY);
|
|
CV_CheckGlError();
|
|
|
|
color_.bind(ogl::Buffer::ARRAY_BUFFER);
|
|
|
|
const int cn = color_.channels();
|
|
|
|
gl::ColorPointer(cn, gl_types[color_.depth()], 0, 0);
|
|
CV_CheckGlError();
|
|
}
|
|
|
|
if (vertex_.empty())
|
|
{
|
|
gl::DisableClientState(gl::VERTEX_ARRAY);
|
|
CV_CheckGlError();
|
|
}
|
|
else
|
|
{
|
|
gl::EnableClientState(gl::VERTEX_ARRAY);
|
|
CV_CheckGlError();
|
|
|
|
vertex_.bind(ogl::Buffer::ARRAY_BUFFER);
|
|
|
|
gl::VertexPointer(vertex_.channels(), gl_types[vertex_.depth()], 0, 0);
|
|
CV_CheckGlError();
|
|
}
|
|
|
|
ogl::Buffer::unbind(ogl::Buffer::ARRAY_BUFFER);
|
|
#endif
|
|
}
|
|
|
|
////////////////////////////////////////////////////////////////////////
|
|
// Rendering
|
|
|
|
void cv::ogl::render(const ogl::Texture2D& tex, Rect_<double> wndRect, Rect_<double> texRect)
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
CV_UNUSED(tex);
|
|
CV_UNUSED(wndRect);
|
|
CV_UNUSED(texRect);
|
|
throw_no_ogl();
|
|
#else
|
|
if (!tex.empty())
|
|
{
|
|
gl::MatrixMode(gl::PROJECTION);
|
|
gl::LoadIdentity();
|
|
gl::Ortho(0.0, 1.0, 1.0, 0.0, -1.0, 1.0);
|
|
CV_CheckGlError();
|
|
|
|
gl::MatrixMode(gl::MODELVIEW);
|
|
gl::LoadIdentity();
|
|
CV_CheckGlError();
|
|
|
|
gl::Disable(gl::LIGHTING);
|
|
CV_CheckGlError();
|
|
|
|
tex.bind();
|
|
|
|
gl::Enable(gl::TEXTURE_2D);
|
|
CV_CheckGlError();
|
|
|
|
gl::TexEnvi(gl::TEXTURE_ENV, gl::TEXTURE_ENV_MODE, gl::REPLACE);
|
|
CV_CheckGlError();
|
|
|
|
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MIN_FILTER, gl::LINEAR);
|
|
CV_CheckGlError();
|
|
|
|
const double vertex[] =
|
|
{
|
|
wndRect.x, wndRect.y, 0.0,
|
|
wndRect.x, (wndRect.y + wndRect.height), 0.0,
|
|
wndRect.x + wndRect.width, (wndRect.y + wndRect.height), 0.0,
|
|
wndRect.x + wndRect.width, wndRect.y, 0.0
|
|
};
|
|
const double texCoords[] =
|
|
{
|
|
texRect.x, texRect.y,
|
|
texRect.x, texRect.y + texRect.height,
|
|
texRect.x + texRect.width, texRect.y + texRect.height,
|
|
texRect.x + texRect.width, texRect.y
|
|
};
|
|
|
|
ogl::Buffer::unbind(ogl::Buffer::ARRAY_BUFFER);
|
|
|
|
gl::EnableClientState(gl::TEXTURE_COORD_ARRAY);
|
|
CV_CheckGlError();
|
|
|
|
gl::TexCoordPointer(2, gl::DOUBLE, 0, texCoords);
|
|
CV_CheckGlError();
|
|
|
|
gl::DisableClientState(gl::NORMAL_ARRAY);
|
|
gl::DisableClientState(gl::COLOR_ARRAY);
|
|
CV_CheckGlError();
|
|
|
|
gl::EnableClientState(gl::VERTEX_ARRAY);
|
|
CV_CheckGlError();
|
|
|
|
gl::VertexPointer(3, gl::DOUBLE, 0, vertex);
|
|
CV_CheckGlError();
|
|
|
|
gl::DrawArrays(gl::QUADS, 0, 4);
|
|
CV_CheckGlError();
|
|
}
|
|
#endif
|
|
}
|
|
|
|
void cv::ogl::render(const ogl::Arrays& arr, int mode, Scalar color)
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
CV_UNUSED(arr);
|
|
CV_UNUSED(mode);
|
|
CV_UNUSED(color);
|
|
throw_no_ogl();
|
|
#else
|
|
if (!arr.empty())
|
|
{
|
|
gl::Color3d(color[0] / 255.0, color[1] / 255.0, color[2] / 255.0);
|
|
|
|
arr.bind();
|
|
|
|
gl::DrawArrays(mode, 0, arr.size());
|
|
}
|
|
#endif
|
|
}
|
|
|
|
void cv::ogl::render(const ogl::Arrays& arr, InputArray indices, int mode, Scalar color)
|
|
{
|
|
#ifndef HAVE_OPENGL
|
|
CV_UNUSED(arr);
|
|
CV_UNUSED(indices);
|
|
CV_UNUSED(mode);
|
|
CV_UNUSED(color);
|
|
throw_no_ogl();
|
|
#else
|
|
if (!arr.empty() && !indices.empty())
|
|
{
|
|
gl::Color3d(color[0] / 255.0, color[1] / 255.0, color[2] / 255.0);
|
|
|
|
arr.bind();
|
|
|
|
const int kind = indices.kind();
|
|
|
|
switch (kind)
|
|
{
|
|
case _InputArray::OPENGL_BUFFER :
|
|
{
|
|
ogl::Buffer buf = indices.getOGlBuffer();
|
|
|
|
const int depth = buf.depth();
|
|
|
|
CV_Assert( buf.channels() == 1 );
|
|
CV_Assert( depth <= CV_32S );
|
|
|
|
GLenum type;
|
|
if (depth < CV_16U)
|
|
type = gl::UNSIGNED_BYTE;
|
|
else if (depth < CV_32S)
|
|
type = gl::UNSIGNED_SHORT;
|
|
else
|
|
type = gl::UNSIGNED_INT;
|
|
|
|
buf.bind(ogl::Buffer::ELEMENT_ARRAY_BUFFER);
|
|
|
|
gl::DrawElements(mode, buf.size().area(), type, 0);
|
|
|
|
ogl::Buffer::unbind(ogl::Buffer::ELEMENT_ARRAY_BUFFER);
|
|
|
|
break;
|
|
}
|
|
|
|
default:
|
|
{
|
|
Mat mat = indices.getMat();
|
|
|
|
const int depth = mat.depth();
|
|
|
|
CV_Assert( mat.channels() == 1 );
|
|
CV_Assert( depth <= CV_32S );
|
|
CV_Assert( mat.isContinuous() );
|
|
|
|
GLenum type;
|
|
if (depth < CV_16U)
|
|
type = gl::UNSIGNED_BYTE;
|
|
else if (depth < CV_32S)
|
|
type = gl::UNSIGNED_SHORT;
|
|
else
|
|
type = gl::UNSIGNED_INT;
|
|
|
|
ogl::Buffer::unbind(ogl::Buffer::ELEMENT_ARRAY_BUFFER);
|
|
|
|
gl::DrawElements(mode, mat.size().area(), type, mat.data);
|
|
}
|
|
}
|
|
}
|
|
#endif
|
|
}
|
|
|
|
////////////////////////////////////////////////////////////////////////
|
|
// CL-GL Interoperability
|
|
|
|
#ifdef HAVE_OPENCL
|
|
# include "opencv2/core/opencl/runtime/opencl_gl.hpp"
|
|
# ifdef cl_khr_gl_sharing
|
|
# define HAVE_OPENCL_OPENGL_SHARING
|
|
# else
|
|
# define NO_OPENCL_SHARING_ERROR CV_Error(cv::Error::StsBadFunc, "OpenCV was build without OpenCL/OpenGL sharing support")
|
|
# endif
|
|
#else // HAVE_OPENCL
|
|
# define NO_OPENCL_SUPPORT_ERROR CV_Error(cv::Error::StsBadFunc, "OpenCV was build without OpenCL support")
|
|
#endif // HAVE_OPENCL
|
|
|
|
#if defined(HAVE_OPENGL)
|
|
# if defined(__ANDROID__)
|
|
# include <EGL/egl.h>
|
|
# elif defined(__linux__)
|
|
# include <GL/glx.h>
|
|
# endif
|
|
#endif // HAVE_OPENGL
|
|
|
|
namespace cv { namespace ogl {
|
|
|
|
namespace ocl {
|
|
|
|
Context& initializeContextFromGL()
|
|
{
|
|
#if !defined(HAVE_OPENGL)
|
|
NO_OPENGL_SUPPORT_ERROR;
|
|
#elif !defined(HAVE_OPENCL)
|
|
NO_OPENCL_SUPPORT_ERROR;
|
|
#elif !defined(HAVE_OPENCL_OPENGL_SHARING)
|
|
NO_OPENCL_SHARING_ERROR;
|
|
#else
|
|
cl_uint numPlatforms;
|
|
cl_int status = clGetPlatformIDs(0, NULL, &numPlatforms);
|
|
if (status != CL_SUCCESS)
|
|
CV_Error(cv::Error::OpenCLInitError, "OpenCL: Can't get number of platforms");
|
|
if (numPlatforms == 0)
|
|
CV_Error(cv::Error::OpenCLInitError, "OpenCL: No available platforms");
|
|
|
|
std::vector<cl_platform_id> platforms(numPlatforms);
|
|
status = clGetPlatformIDs(numPlatforms, &platforms[0], NULL);
|
|
if (status != CL_SUCCESS)
|
|
CV_Error(cv::Error::OpenCLInitError, "OpenCL: Can't get number of platforms");
|
|
|
|
// TODO Filter platforms by name from OPENCV_OPENCL_DEVICE
|
|
|
|
int found = -1;
|
|
cl_device_id device = NULL;
|
|
cl_context context = NULL;
|
|
|
|
for (int i = 0; i < (int)numPlatforms; i++)
|
|
{
|
|
// query platform extension: presence of "cl_khr_gl_sharing" extension is required
|
|
{
|
|
AutoBuffer<char> extensionStr;
|
|
|
|
size_t extensionSize;
|
|
status = clGetPlatformInfo(platforms[i], CL_PLATFORM_EXTENSIONS, 0, NULL, &extensionSize);
|
|
if (status == CL_SUCCESS)
|
|
{
|
|
extensionStr.allocate(extensionSize+1);
|
|
status = clGetPlatformInfo(platforms[i], CL_PLATFORM_EXTENSIONS, extensionSize, (char*)extensionStr.data(), NULL);
|
|
}
|
|
if (status != CL_SUCCESS)
|
|
CV_Error(cv::Error::OpenCLInitError, "OpenCL: Can't get platform extension string");
|
|
|
|
if (!strstr((const char*)extensionStr.data(), "cl_khr_gl_sharing"))
|
|
continue;
|
|
}
|
|
|
|
clGetGLContextInfoKHR_fn clGetGLContextInfoKHR = (clGetGLContextInfoKHR_fn)
|
|
clGetExtensionFunctionAddressForPlatform(platforms[i], "clGetGLContextInfoKHR");
|
|
if (!clGetGLContextInfoKHR)
|
|
continue;
|
|
|
|
cl_context_properties properties[] =
|
|
{
|
|
#if defined(_WIN32)
|
|
CL_CONTEXT_PLATFORM, (cl_context_properties)platforms[i],
|
|
CL_GL_CONTEXT_KHR, (cl_context_properties)wglGetCurrentContext(),
|
|
CL_WGL_HDC_KHR, (cl_context_properties)wglGetCurrentDC(),
|
|
#elif defined(__ANDROID__)
|
|
CL_CONTEXT_PLATFORM, (cl_context_properties)platforms[i],
|
|
CL_GL_CONTEXT_KHR, (cl_context_properties)eglGetCurrentContext(),
|
|
CL_EGL_DISPLAY_KHR, (cl_context_properties)eglGetCurrentDisplay(),
|
|
#elif defined(__linux__)
|
|
CL_CONTEXT_PLATFORM, (cl_context_properties)platforms[i],
|
|
CL_GL_CONTEXT_KHR, (cl_context_properties)glXGetCurrentContext(),
|
|
CL_GLX_DISPLAY_KHR, (cl_context_properties)glXGetCurrentDisplay(),
|
|
#endif
|
|
0
|
|
};
|
|
|
|
// query device
|
|
device = NULL;
|
|
status = clGetGLContextInfoKHR(properties, CL_CURRENT_DEVICE_FOR_GL_CONTEXT_KHR, sizeof(cl_device_id), (void*)&device, NULL);
|
|
if (status != CL_SUCCESS)
|
|
continue;
|
|
|
|
// create context
|
|
context = clCreateContext(properties, 1, &device, NULL, NULL, &status);
|
|
if (status != CL_SUCCESS)
|
|
{
|
|
clReleaseDevice(device);
|
|
}
|
|
else
|
|
{
|
|
found = i;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (found < 0)
|
|
CV_Error(cv::Error::OpenCLInitError, "OpenCL: Can't create context for OpenGL interop");
|
|
|
|
Context& ctx = Context::getDefault(false);
|
|
initializeContextFromHandle(ctx, platforms[found], context, device);
|
|
return ctx;
|
|
#endif
|
|
}
|
|
|
|
} // namespace cv::ogl::ocl
|
|
|
|
void convertToGLTexture2D(InputArray src, Texture2D& texture)
|
|
{
|
|
CV_UNUSED(src); CV_UNUSED(texture);
|
|
#if !defined(HAVE_OPENGL)
|
|
NO_OPENGL_SUPPORT_ERROR;
|
|
#elif !defined(HAVE_OPENCL)
|
|
NO_OPENCL_SUPPORT_ERROR;
|
|
#elif !defined(HAVE_OPENCL_OPENGL_SHARING)
|
|
NO_OPENCL_SHARING_ERROR;
|
|
#else
|
|
Size srcSize = src.size();
|
|
CV_Assert(srcSize.width == (int)texture.cols() && srcSize.height == (int)texture.rows());
|
|
|
|
using namespace cv::ocl;
|
|
Context& ctx = Context::getDefault();
|
|
cl_context context = (cl_context)ctx.ptr();
|
|
|
|
UMat u = src.getUMat();
|
|
|
|
// TODO Add support for roi
|
|
CV_Assert(u.offset == 0);
|
|
CV_Assert(u.isContinuous());
|
|
|
|
cl_int status = 0;
|
|
cl_mem clImage = clCreateFromGLTexture(context, CL_MEM_WRITE_ONLY, gl::TEXTURE_2D, 0, texture.texId(), &status);
|
|
if (status != CL_SUCCESS)
|
|
CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromGLTexture failed");
|
|
|
|
cl_mem clBuffer = (cl_mem)u.handle(ACCESS_READ);
|
|
|
|
cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr();
|
|
status = clEnqueueAcquireGLObjects(q, 1, &clImage, 0, NULL, NULL);
|
|
if (status != CL_SUCCESS)
|
|
CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireGLObjects failed");
|
|
size_t offset = 0; // TODO
|
|
size_t dst_origin[3] = {0, 0, 0};
|
|
size_t region[3] = { (size_t)u.cols, (size_t)u.rows, 1};
|
|
status = clEnqueueCopyBufferToImage(q, clBuffer, clImage, offset, dst_origin, region, 0, NULL, NULL);
|
|
if (status != CL_SUCCESS)
|
|
CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueCopyBufferToImage failed");
|
|
status = clEnqueueReleaseGLObjects(q, 1, &clImage, 0, NULL, NULL);
|
|
if (status != CL_SUCCESS)
|
|
CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseGLObjects failed");
|
|
|
|
status = clFinish(q); // TODO Use events
|
|
if (status != CL_SUCCESS)
|
|
CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clFinish failed");
|
|
|
|
status = clReleaseMemObject(clImage); // TODO RAII
|
|
if (status != CL_SUCCESS)
|
|
CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clReleaseMemObject failed");
|
|
#endif
|
|
}
|
|
|
|
void convertFromGLTexture2D(const Texture2D& texture, OutputArray dst)
|
|
{
|
|
CV_UNUSED(texture); CV_UNUSED(dst);
|
|
#if !defined(HAVE_OPENGL)
|
|
NO_OPENGL_SUPPORT_ERROR;
|
|
#elif !defined(HAVE_OPENCL)
|
|
NO_OPENCL_SUPPORT_ERROR;
|
|
#elif !defined(HAVE_OPENCL_OPENGL_SHARING)
|
|
NO_OPENCL_SHARING_ERROR;
|
|
#else
|
|
// check texture format
|
|
const int dtype = CV_8UC4;
|
|
CV_Assert(texture.format() == Texture2D::RGBA);
|
|
|
|
int textureType = dtype;
|
|
CV_Assert(textureType >= 0);
|
|
|
|
using namespace cv::ocl;
|
|
Context& ctx = Context::getDefault();
|
|
cl_context context = (cl_context)ctx.ptr();
|
|
|
|
// TODO Need to specify ACCESS_WRITE here somehow to prevent useless data copying!
|
|
dst.create(texture.size(), textureType);
|
|
UMat u = dst.getUMat();
|
|
|
|
// TODO Add support for roi
|
|
CV_Assert(u.offset == 0);
|
|
CV_Assert(u.isContinuous());
|
|
|
|
cl_int status = 0;
|
|
cl_mem clImage = clCreateFromGLTexture(context, CL_MEM_READ_ONLY, gl::TEXTURE_2D, 0, texture.texId(), &status);
|
|
if (status != CL_SUCCESS)
|
|
CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromGLTexture failed");
|
|
|
|
cl_mem clBuffer = (cl_mem)u.handle(ACCESS_READ);
|
|
|
|
cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr();
|
|
status = clEnqueueAcquireGLObjects(q, 1, &clImage, 0, NULL, NULL);
|
|
if (status != CL_SUCCESS)
|
|
CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireGLObjects failed");
|
|
size_t offset = 0; // TODO
|
|
size_t src_origin[3] = {0, 0, 0};
|
|
size_t region[3] = { (size_t)u.cols, (size_t)u.rows, 1};
|
|
status = clEnqueueCopyImageToBuffer(q, clImage, clBuffer, src_origin, region, offset, 0, NULL, NULL);
|
|
if (status != CL_SUCCESS)
|
|
CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueCopyImageToBuffer failed");
|
|
status = clEnqueueReleaseGLObjects(q, 1, &clImage, 0, NULL, NULL);
|
|
if (status != CL_SUCCESS)
|
|
CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseGLObjects failed");
|
|
|
|
status = clFinish(q); // TODO Use events
|
|
if (status != CL_SUCCESS)
|
|
CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clFinish failed");
|
|
|
|
status = clReleaseMemObject(clImage); // TODO RAII
|
|
if (status != CL_SUCCESS)
|
|
CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clReleaseMemObject failed");
|
|
#endif
|
|
}
|
|
|
|
//void mapGLBuffer(const Buffer& buffer, UMat& dst, int accessFlags)
|
|
UMat mapGLBuffer(const Buffer& buffer, int accessFlags)
|
|
{
|
|
CV_UNUSED(buffer); CV_UNUSED(accessFlags);
|
|
#if !defined(HAVE_OPENGL)
|
|
NO_OPENGL_SUPPORT_ERROR;
|
|
#elif !defined(HAVE_OPENCL)
|
|
NO_OPENCL_SUPPORT_ERROR;
|
|
#elif !defined(HAVE_OPENCL_OPENGL_SHARING)
|
|
NO_OPENCL_SHARING_ERROR;
|
|
#else
|
|
using namespace cv::ocl;
|
|
Context& ctx = Context::getDefault();
|
|
cl_context context = (cl_context)ctx.ptr();
|
|
cl_command_queue clQueue = (cl_command_queue)Queue::getDefault().ptr();
|
|
|
|
int clAccessFlags = 0;
|
|
switch (accessFlags & (ACCESS_READ|ACCESS_WRITE))
|
|
{
|
|
default:
|
|
case ACCESS_READ|ACCESS_WRITE:
|
|
clAccessFlags = CL_MEM_READ_WRITE;
|
|
break;
|
|
case ACCESS_READ:
|
|
clAccessFlags = CL_MEM_READ_ONLY;
|
|
break;
|
|
case ACCESS_WRITE:
|
|
clAccessFlags = CL_MEM_WRITE_ONLY;
|
|
break;
|
|
}
|
|
|
|
cl_int status = 0;
|
|
cl_mem clBuffer = clCreateFromGLBuffer(context, clAccessFlags, buffer.bufId(), &status);
|
|
if (status != CL_SUCCESS)
|
|
CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromGLBuffer failed");
|
|
|
|
gl::Finish();
|
|
|
|
status = clEnqueueAcquireGLObjects(clQueue, 1, &clBuffer, 0, NULL, NULL);
|
|
if (status != CL_SUCCESS)
|
|
CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireGLObjects failed");
|
|
|
|
size_t step = buffer.cols() * buffer.elemSize();
|
|
int rows = buffer.rows();
|
|
int cols = buffer.cols();
|
|
int type = buffer.type();
|
|
|
|
UMat u;
|
|
convertFromBuffer(clBuffer, step, rows, cols, type, u);
|
|
return u;
|
|
#endif
|
|
}
|
|
|
|
void unmapGLBuffer(UMat& u)
|
|
{
|
|
CV_UNUSED(u);
|
|
#if !defined(HAVE_OPENGL)
|
|
NO_OPENGL_SUPPORT_ERROR;
|
|
#elif !defined(HAVE_OPENCL)
|
|
NO_OPENCL_SUPPORT_ERROR;
|
|
#elif !defined(HAVE_OPENCL_OPENGL_SHARING)
|
|
NO_OPENCL_SHARING_ERROR;
|
|
#else
|
|
using namespace cv::ocl;
|
|
cl_command_queue clQueue = (cl_command_queue)Queue::getDefault().ptr();
|
|
|
|
cl_mem clBuffer = (cl_mem)u.handle(ACCESS_READ);
|
|
|
|
u.release();
|
|
|
|
cl_int status = clEnqueueReleaseGLObjects(clQueue, 1, &clBuffer, 0, NULL, NULL);
|
|
if (status != CL_SUCCESS)
|
|
CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseGLObjects failed");
|
|
|
|
status = clFinish(clQueue);
|
|
if (status != CL_SUCCESS)
|
|
CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clFinish failed");
|
|
|
|
status = clReleaseMemObject(clBuffer);
|
|
if (status != CL_SUCCESS)
|
|
CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clReleaseMemObject failed");
|
|
#endif
|
|
}
|
|
|
|
}} // namespace cv::ogl
|