mirror of
https://github.com/opencv/opencv.git
synced 2025-06-07 09:25:45 +08:00
Support Python binding for CUDA functionalities
This commit is contained in:
parent
053259fd92
commit
293facbae7
@ -102,7 +102,7 @@ streams.
|
||||
|
||||
@sa Mat
|
||||
*/
|
||||
class CV_EXPORTS GpuMat
|
||||
class CV_EXPORTS_W GpuMat
|
||||
{
|
||||
public:
|
||||
class CV_EXPORTS Allocator
|
||||
@ -120,15 +120,15 @@ public:
|
||||
static void setDefaultAllocator(Allocator* allocator);
|
||||
|
||||
//! default constructor
|
||||
explicit GpuMat(Allocator* allocator = defaultAllocator());
|
||||
CV_WRAP explicit GpuMat(GpuMat::Allocator* allocator = GpuMat::defaultAllocator());
|
||||
|
||||
//! constructs GpuMat of the specified size and type
|
||||
GpuMat(int rows, int cols, int type, Allocator* allocator = defaultAllocator());
|
||||
GpuMat(Size size, int type, Allocator* allocator = defaultAllocator());
|
||||
CV_WRAP GpuMat(int rows, int cols, int type, GpuMat::Allocator* allocator = GpuMat::defaultAllocator());
|
||||
CV_WRAP GpuMat(Size size, int type, GpuMat::Allocator* allocator = GpuMat::defaultAllocator());
|
||||
|
||||
//! constucts GpuMat and fills it with the specified value _s
|
||||
GpuMat(int rows, int cols, int type, Scalar s, Allocator* allocator = defaultAllocator());
|
||||
GpuMat(Size size, int type, Scalar s, Allocator* allocator = defaultAllocator());
|
||||
CV_WRAP GpuMat(int rows, int cols, int type, Scalar s, GpuMat::Allocator* allocator = GpuMat::defaultAllocator());
|
||||
CV_WRAP GpuMat(Size size, int type, Scalar s, GpuMat::Allocator* allocator = GpuMat::defaultAllocator());
|
||||
|
||||
//! copy constructor
|
||||
GpuMat(const GpuMat& m);
|
||||
@ -142,7 +142,7 @@ public:
|
||||
GpuMat(const GpuMat& m, Rect roi);
|
||||
|
||||
//! builds GpuMat from host memory (Blocking call)
|
||||
explicit GpuMat(InputArray arr, Allocator* allocator = defaultAllocator());
|
||||
CV_WRAP explicit GpuMat(InputArray arr, GpuMat::Allocator* allocator = GpuMat::defaultAllocator());
|
||||
|
||||
//! destructor - calls release()
|
||||
~GpuMat();
|
||||
@ -151,8 +151,8 @@ public:
|
||||
GpuMat& operator =(const GpuMat& m);
|
||||
|
||||
//! allocates new GpuMat data unless the GpuMat already has specified size and type
|
||||
void create(int rows, int cols, int type);
|
||||
void create(Size size, int type);
|
||||
CV_WRAP void create(int rows, int cols, int type);
|
||||
CV_WRAP void create(Size size, int type);
|
||||
|
||||
//! decreases reference counter, deallocate the data when reference counter reaches 0
|
||||
void release();
|
||||
@ -165,7 +165,7 @@ public:
|
||||
This function copies data from host memory to device memory. As being a blocking call, it is
|
||||
guaranteed that the copy operation is finished when this function returns.
|
||||
*/
|
||||
void upload(InputArray arr);
|
||||
CV_WRAP void upload(InputArray arr);
|
||||
|
||||
/** @brief Performs data upload to GpuMat (Non-Blocking call)
|
||||
|
||||
@ -175,14 +175,14 @@ public:
|
||||
The copy operation may be overlapped with operations in other non-default streams if \p stream is
|
||||
not the default stream and \p dst is HostMem allocated with HostMem::PAGE_LOCKED option.
|
||||
*/
|
||||
void upload(InputArray arr, Stream& stream);
|
||||
CV_WRAP void upload(InputArray arr, Stream& stream);
|
||||
|
||||
/** @brief Performs data download from GpuMat (Blocking call)
|
||||
|
||||
This function copies data from device memory to host memory. As being a blocking call, it is
|
||||
guaranteed that the copy operation is finished when this function returns.
|
||||
*/
|
||||
void download(OutputArray dst) const;
|
||||
CV_WRAP void download(OutputArray dst) const;
|
||||
|
||||
/** @brief Performs data download from GpuMat (Non-Blocking call)
|
||||
|
||||
@ -192,22 +192,22 @@ public:
|
||||
The copy operation may be overlapped with operations in other non-default streams if \p stream is
|
||||
not the default stream and \p dst is HostMem allocated with HostMem::PAGE_LOCKED option.
|
||||
*/
|
||||
void download(OutputArray dst, Stream& stream) const;
|
||||
CV_WRAP void download(OutputArray dst, Stream& stream) const;
|
||||
|
||||
//! returns deep copy of the GpuMat, i.e. the data is copied
|
||||
GpuMat clone() const;
|
||||
|
||||
//! copies the GpuMat content to device memory (Blocking call)
|
||||
void copyTo(OutputArray dst) const;
|
||||
CV_WRAP void copyTo(OutputArray dst) const;
|
||||
|
||||
//! copies the GpuMat content to device memory (Non-Blocking call)
|
||||
void copyTo(OutputArray dst, Stream& stream) const;
|
||||
CV_WRAP void copyTo(OutputArray dst, Stream& stream) const;
|
||||
|
||||
//! copies those GpuMat elements to "m" that are marked with non-zero mask elements (Blocking call)
|
||||
void copyTo(OutputArray dst, InputArray mask) const;
|
||||
CV_WRAP void copyTo(OutputArray dst, InputArray mask) const;
|
||||
|
||||
//! copies those GpuMat elements to "m" that are marked with non-zero mask elements (Non-Blocking call)
|
||||
void copyTo(OutputArray dst, InputArray mask, Stream& stream) const;
|
||||
CV_WRAP void copyTo(OutputArray dst, InputArray mask, Stream& stream) const;
|
||||
|
||||
//! sets some of the GpuMat elements to s (Blocking call)
|
||||
GpuMat& setTo(Scalar s);
|
||||
@ -222,19 +222,19 @@ public:
|
||||
GpuMat& setTo(Scalar s, InputArray mask, Stream& stream);
|
||||
|
||||
//! converts GpuMat to another datatype (Blocking call)
|
||||
void convertTo(OutputArray dst, int rtype) const;
|
||||
CV_WRAP void convertTo(OutputArray dst, int rtype) const;
|
||||
|
||||
//! converts GpuMat to another datatype (Non-Blocking call)
|
||||
void convertTo(OutputArray dst, int rtype, Stream& stream) const;
|
||||
CV_WRAP void convertTo(OutputArray dst, int rtype, Stream& stream) const;
|
||||
|
||||
//! converts GpuMat to another datatype with scaling (Blocking call)
|
||||
void convertTo(OutputArray dst, int rtype, double alpha, double beta = 0.0) const;
|
||||
CV_WRAP void convertTo(OutputArray dst, int rtype, double alpha, double beta = 0.0) const;
|
||||
|
||||
//! converts GpuMat to another datatype with scaling (Non-Blocking call)
|
||||
void convertTo(OutputArray dst, int rtype, double alpha, Stream& stream) const;
|
||||
CV_WRAP void convertTo(OutputArray dst, int rtype, double alpha, Stream& stream) const;
|
||||
|
||||
//! converts GpuMat to another datatype with scaling (Non-Blocking call)
|
||||
void convertTo(OutputArray dst, int rtype, double alpha, double beta, Stream& stream) const;
|
||||
CV_WRAP void convertTo(OutputArray dst, int rtype, double alpha, double beta, Stream& stream) const;
|
||||
|
||||
void assignTo(GpuMat& m, int type=-1) const;
|
||||
|
||||
@ -741,7 +741,7 @@ Use this function before any other CUDA functions calls. If OpenCV is compiled w
|
||||
this function returns 0. If the CUDA driver is not installed, or is incompatible, this function
|
||||
returns -1.
|
||||
*/
|
||||
CV_EXPORTS int getCudaEnabledDeviceCount();
|
||||
CV_EXPORTS_W int getCudaEnabledDeviceCount();
|
||||
|
||||
/** @brief Sets a device and initializes it for the current thread.
|
||||
|
||||
@ -749,18 +749,18 @@ CV_EXPORTS int getCudaEnabledDeviceCount();
|
||||
|
||||
If the call of this function is omitted, a default device is initialized at the fist CUDA usage.
|
||||
*/
|
||||
CV_EXPORTS void setDevice(int device);
|
||||
CV_EXPORTS_W void setDevice(int device);
|
||||
|
||||
/** @brief Returns the current device index set by cuda::setDevice or initialized by default.
|
||||
*/
|
||||
CV_EXPORTS int getDevice();
|
||||
CV_EXPORTS_W int getDevice();
|
||||
|
||||
/** @brief Explicitly destroys and cleans up all resources associated with the current device in the current
|
||||
process.
|
||||
|
||||
Any subsequent API call to this device will reinitialize the device.
|
||||
*/
|
||||
CV_EXPORTS void resetDevice();
|
||||
CV_EXPORTS_W void resetDevice();
|
||||
|
||||
/** @brief Enumeration providing CUDA computing features.
|
||||
*/
|
||||
@ -1027,8 +1027,8 @@ private:
|
||||
int device_id_;
|
||||
};
|
||||
|
||||
CV_EXPORTS void printCudaDeviceInfo(int device);
|
||||
CV_EXPORTS void printShortCudaDeviceInfo(int device);
|
||||
CV_EXPORTS_W void printCudaDeviceInfo(int device);
|
||||
CV_EXPORTS_W void printShortCudaDeviceInfo(int device);
|
||||
|
||||
/** @brief Converts an array to half precision floating number.
|
||||
|
||||
|
54
modules/core/misc/python/pyopencv_core.hpp
Normal file
54
modules/core/misc/python/pyopencv_core.hpp
Normal file
@ -0,0 +1,54 @@
|
||||
#ifdef HAVE_OPENCV_CORE
|
||||
|
||||
#include "opencv2/core/cuda.hpp"
|
||||
|
||||
typedef std::vector<cuda::GpuMat> vector_GpuMat;
|
||||
typedef cuda::GpuMat::Allocator GpuMat_Allocator;
|
||||
|
||||
template<> bool pyopencv_to(PyObject* o, Ptr<cuda::GpuMat>& m, const char* name);
|
||||
template<> PyObject* pyopencv_from(const Ptr<cuda::GpuMat>& m);
|
||||
|
||||
template<>
|
||||
bool pyopencv_to(PyObject* o, cuda::GpuMat& m, const char* name)
|
||||
{
|
||||
if (!o || o == Py_None)
|
||||
return true;
|
||||
Ptr<cuda::GpuMat> mPtr(new cuda::GpuMat());
|
||||
|
||||
if (!pyopencv_to(o, mPtr, name)) return false;
|
||||
m = *mPtr;
|
||||
return true;
|
||||
}
|
||||
|
||||
template<>
|
||||
PyObject* pyopencv_from(const cuda::GpuMat& m)
|
||||
{
|
||||
Ptr<cuda::GpuMat> mPtr(new cuda::GpuMat());
|
||||
|
||||
*mPtr = m;
|
||||
return pyopencv_from(mPtr);
|
||||
}
|
||||
|
||||
template<>
|
||||
bool pyopencv_to(PyObject *o, cuda::GpuMat::Allocator* &allocator, const char *name)
|
||||
{
|
||||
(void)name;
|
||||
if (!o || o == Py_None)
|
||||
return true;
|
||||
|
||||
failmsg("Python binding for cv::cuda::GpuMat::Allocator is not implemented yet.");
|
||||
return false;
|
||||
}
|
||||
|
||||
template<>
|
||||
bool pyopencv_to(PyObject *o, cuda::Stream &stream, const char *name)
|
||||
{
|
||||
(void)name;
|
||||
if (!o || o == Py_None)
|
||||
return true;
|
||||
|
||||
failmsg("Python binding for cv::cuda::Stream is not implemented yet.");
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif
|
@ -6,4 +6,4 @@ set(the_description "CUDA-accelerated Background Segmentation")
|
||||
|
||||
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 /wd4324 /wd4512 -Wundef -Wmissing-declarations -Wshadow)
|
||||
|
||||
ocv_define_module(cudabgsegm opencv_video)
|
||||
ocv_define_module(cudabgsegm opencv_video WRAP python)
|
||||
|
@ -6,4 +6,4 @@ set(the_description "CUDA-accelerated Feature Detection and Description")
|
||||
|
||||
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 /wd4100 /wd4324 /wd4512 /wd4515 -Wundef -Wmissing-declarations -Wshadow -Wunused-parameter -Wshadow)
|
||||
|
||||
ocv_define_module(cudafeatures2d opencv_features2d opencv_cudafilters opencv_cudawarping)
|
||||
ocv_define_module(cudafeatures2d opencv_features2d opencv_cudafilters opencv_cudawarping WRAP python)
|
||||
|
@ -6,4 +6,4 @@ set(the_description "CUDA-accelerated Image Filtering")
|
||||
|
||||
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 /wd4324 /wd4512 -Wundef -Wmissing-declarations -Wshadow)
|
||||
|
||||
ocv_define_module(cudafilters opencv_imgproc opencv_cudaarithm)
|
||||
ocv_define_module(cudafilters opencv_imgproc opencv_cudaarithm WRAP python)
|
||||
|
@ -72,7 +72,7 @@ namespace cv { namespace cuda {
|
||||
|
||||
/** @brief Common interface for all CUDA filters :
|
||||
*/
|
||||
class CV_EXPORTS Filter : public Algorithm
|
||||
class CV_EXPORTS_W Filter : public Algorithm
|
||||
{
|
||||
public:
|
||||
/** @brief Applies the specified filter to the image.
|
||||
@ -81,7 +81,7 @@ public:
|
||||
@param dst Output image.
|
||||
@param stream Stream for the asynchronous version.
|
||||
*/
|
||||
virtual void apply(InputArray src, OutputArray dst, Stream& stream = Stream::Null()) = 0;
|
||||
CV_WRAP virtual void apply(InputArray src, OutputArray dst, Stream& stream = Stream::Null()) = 0;
|
||||
};
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
@ -99,7 +99,7 @@ center.
|
||||
|
||||
@sa boxFilter
|
||||
*/
|
||||
CV_EXPORTS Ptr<Filter> createBoxFilter(int srcType, int dstType, Size ksize, Point anchor = Point(-1,-1),
|
||||
CV_EXPORTS_W Ptr<Filter> createBoxFilter(int srcType, int dstType, Size ksize, Point anchor = Point(-1, -1),
|
||||
int borderMode = BORDER_DEFAULT, Scalar borderVal = Scalar::all(0));
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
@ -117,7 +117,7 @@ center.
|
||||
|
||||
@sa filter2D
|
||||
*/
|
||||
CV_EXPORTS Ptr<Filter> createLinearFilter(int srcType, int dstType, InputArray kernel, Point anchor = Point(-1,-1),
|
||||
CV_EXPORTS_W Ptr<Filter> createLinearFilter(int srcType, int dstType, InputArray kernel, Point anchor = Point(-1, -1),
|
||||
int borderMode = BORDER_DEFAULT, Scalar borderVal = Scalar::all(0));
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
@ -136,7 +136,7 @@ applied (see getDerivKernels ).
|
||||
|
||||
@sa Laplacian
|
||||
*/
|
||||
CV_EXPORTS Ptr<Filter> createLaplacianFilter(int srcType, int dstType, int ksize = 1, double scale = 1,
|
||||
CV_EXPORTS_W Ptr<Filter> createLaplacianFilter(int srcType, int dstType, int ksize = 1, double scale = 1,
|
||||
int borderMode = BORDER_DEFAULT, Scalar borderVal = Scalar::all(0));
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
@ -156,7 +156,7 @@ borderInterpolate.
|
||||
|
||||
@sa sepFilter2D
|
||||
*/
|
||||
CV_EXPORTS Ptr<Filter> createSeparableLinearFilter(int srcType, int dstType, InputArray rowKernel, InputArray columnKernel,
|
||||
CV_EXPORTS_W Ptr<Filter> createSeparableLinearFilter(int srcType, int dstType, InputArray rowKernel, InputArray columnKernel,
|
||||
Point anchor = Point(-1,-1), int rowBorderMode = BORDER_DEFAULT, int columnBorderMode = -1);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
@ -177,7 +177,7 @@ applied. For details, see getDerivKernels .
|
||||
borderInterpolate.
|
||||
@param columnBorderMode Pixel extrapolation method in the horizontal direction.
|
||||
*/
|
||||
CV_EXPORTS Ptr<Filter> createDerivFilter(int srcType, int dstType, int dx, int dy,
|
||||
CV_EXPORTS_W Ptr<Filter> createDerivFilter(int srcType, int dstType, int dx, int dy,
|
||||
int ksize, bool normalize = false, double scale = 1,
|
||||
int rowBorderMode = BORDER_DEFAULT, int columnBorderMode = -1);
|
||||
|
||||
@ -196,7 +196,7 @@ borderInterpolate.
|
||||
|
||||
@sa Sobel
|
||||
*/
|
||||
CV_EXPORTS Ptr<Filter> createSobelFilter(int srcType, int dstType, int dx, int dy, int ksize = 3,
|
||||
CV_EXPORTS_W Ptr<Filter> createSobelFilter(int srcType, int dstType, int dx, int dy, int ksize = 3,
|
||||
double scale = 1, int rowBorderMode = BORDER_DEFAULT, int columnBorderMode = -1);
|
||||
|
||||
/** @brief Creates a vertical or horizontal Scharr operator.
|
||||
@ -213,7 +213,7 @@ borderInterpolate.
|
||||
|
||||
@sa Scharr
|
||||
*/
|
||||
CV_EXPORTS Ptr<Filter> createScharrFilter(int srcType, int dstType, int dx, int dy,
|
||||
CV_EXPORTS_W Ptr<Filter> createScharrFilter(int srcType, int dstType, int dx, int dy,
|
||||
double scale = 1, int rowBorderMode = BORDER_DEFAULT, int columnBorderMode = -1);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
@ -233,7 +233,7 @@ borderInterpolate.
|
||||
|
||||
@sa GaussianBlur
|
||||
*/
|
||||
CV_EXPORTS Ptr<Filter> createGaussianFilter(int srcType, int dstType, Size ksize,
|
||||
CV_EXPORTS_W Ptr<Filter> createGaussianFilter(int srcType, int dstType, Size ksize,
|
||||
double sigma1, double sigma2 = 0,
|
||||
int rowBorderMode = BORDER_DEFAULT, int columnBorderMode = -1);
|
||||
|
||||
@ -258,7 +258,7 @@ is at the center.
|
||||
|
||||
@sa morphologyEx
|
||||
*/
|
||||
CV_EXPORTS Ptr<Filter> createMorphologyFilter(int op, int srcType, InputArray kernel, Point anchor = Point(-1, -1), int iterations = 1);
|
||||
CV_EXPORTS_W Ptr<Filter> createMorphologyFilter(int op, int srcType, InputArray kernel, Point anchor = Point(-1, -1), int iterations = 1);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Image Rank Filter
|
||||
@ -271,7 +271,7 @@ CV_EXPORTS Ptr<Filter> createMorphologyFilter(int op, int srcType, InputArray ke
|
||||
@param borderMode Pixel extrapolation method. For details, see borderInterpolate .
|
||||
@param borderVal Default border value.
|
||||
*/
|
||||
CV_EXPORTS Ptr<Filter> createBoxMaxFilter(int srcType, Size ksize,
|
||||
CV_EXPORTS_W Ptr<Filter> createBoxMaxFilter(int srcType, Size ksize,
|
||||
Point anchor = Point(-1, -1),
|
||||
int borderMode = BORDER_DEFAULT, Scalar borderVal = Scalar::all(0));
|
||||
|
||||
@ -283,7 +283,7 @@ CV_EXPORTS Ptr<Filter> createBoxMaxFilter(int srcType, Size ksize,
|
||||
@param borderMode Pixel extrapolation method. For details, see borderInterpolate .
|
||||
@param borderVal Default border value.
|
||||
*/
|
||||
CV_EXPORTS Ptr<Filter> createBoxMinFilter(int srcType, Size ksize,
|
||||
CV_EXPORTS_W Ptr<Filter> createBoxMinFilter(int srcType, Size ksize,
|
||||
Point anchor = Point(-1, -1),
|
||||
int borderMode = BORDER_DEFAULT, Scalar borderVal = Scalar::all(0));
|
||||
|
||||
@ -299,7 +299,7 @@ CV_EXPORTS Ptr<Filter> createBoxMinFilter(int srcType, Size ksize,
|
||||
@param borderMode Pixel extrapolation method. For details, see borderInterpolate .
|
||||
@param borderVal Default border value.
|
||||
*/
|
||||
CV_EXPORTS Ptr<Filter> createRowSumFilter(int srcType, int dstType, int ksize, int anchor = -1, int borderMode = BORDER_DEFAULT, Scalar borderVal = Scalar::all(0));
|
||||
CV_EXPORTS_W Ptr<Filter> createRowSumFilter(int srcType, int dstType, int ksize, int anchor = -1, int borderMode = BORDER_DEFAULT, Scalar borderVal = Scalar::all(0));
|
||||
|
||||
/** @brief Creates a vertical 1D box filter.
|
||||
|
||||
@ -310,7 +310,7 @@ CV_EXPORTS Ptr<Filter> createRowSumFilter(int srcType, int dstType, int ksize, i
|
||||
@param borderMode Pixel extrapolation method. For details, see borderInterpolate .
|
||||
@param borderVal Default border value.
|
||||
*/
|
||||
CV_EXPORTS Ptr<Filter> createColumnSumFilter(int srcType, int dstType, int ksize, int anchor = -1, int borderMode = BORDER_DEFAULT, Scalar borderVal = Scalar::all(0));
|
||||
CV_EXPORTS_W Ptr<Filter> createColumnSumFilter(int srcType, int dstType, int ksize, int anchor = -1, int borderMode = BORDER_DEFAULT, Scalar borderVal = Scalar::all(0));
|
||||
|
||||
//! @}
|
||||
|
||||
@ -324,7 +324,7 @@ CV_EXPORTS Ptr<Filter> createColumnSumFilter(int srcType, int dstType, int ksize
|
||||
|
||||
Outputs an image that has been filtered using median-filtering formulation.
|
||||
*/
|
||||
CV_EXPORTS Ptr<Filter> createMedianFilter(int srcType, int windowSize, int partition=128);
|
||||
CV_EXPORTS_W Ptr<Filter> createMedianFilter(int srcType, int windowSize, int partition = 128);
|
||||
|
||||
}} // namespace cv { namespace cuda {
|
||||
|
||||
|
@ -6,4 +6,4 @@ set(the_description "CUDA-accelerated Image Processing")
|
||||
|
||||
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 /wd4100 /wd4324 /wd4512 /wd4515 -Wundef -Wmissing-declarations -Wshadow -Wunused-parameter)
|
||||
|
||||
ocv_define_module(cudaimgproc opencv_imgproc OPTIONAL opencv_cudev opencv_cudaarithm opencv_cudafilters)
|
||||
ocv_define_module(cudaimgproc opencv_imgproc OPTIONAL opencv_cudev opencv_cudaarithm opencv_cudafilters WRAP python)
|
||||
|
@ -87,7 +87,7 @@ performance.
|
||||
|
||||
@sa cvtColor
|
||||
*/
|
||||
CV_EXPORTS void cvtColor(InputArray src, OutputArray dst, int code, int dcn = 0, Stream& stream = Stream::Null());
|
||||
CV_EXPORTS_W void cvtColor(InputArray src, OutputArray dst, int code, int dcn = 0, Stream& stream = Stream::Null());
|
||||
|
||||
enum DemosaicTypes
|
||||
{
|
||||
@ -133,7 +133,7 @@ The function can do the following transformations:
|
||||
|
||||
@sa cvtColor
|
||||
*/
|
||||
CV_EXPORTS void demosaicing(InputArray src, OutputArray dst, int code, int dcn = -1, Stream& stream = Stream::Null());
|
||||
CV_EXPORTS_W void demosaicing(InputArray src, OutputArray dst, int code, int dcn = -1, Stream& stream = Stream::Null());
|
||||
|
||||
/** @brief Exchanges the color channels of an image in-place.
|
||||
|
||||
@ -145,7 +145,7 @@ E.g. Given an RGBA image, aDstOrder = [3,2,1,0] converts this to ABGR channel or
|
||||
|
||||
The methods support arbitrary permutations of the original channels, including replication.
|
||||
*/
|
||||
CV_EXPORTS void swapChannels(InputOutputArray image, const int dstOrder[4], Stream& stream = Stream::Null());
|
||||
CV_EXPORTS_W void swapChannels(InputOutputArray image, const int dstOrder[4], Stream& stream = Stream::Null());
|
||||
|
||||
/** @brief Routines for correcting image color gamma.
|
||||
|
||||
@ -154,7 +154,7 @@ CV_EXPORTS void swapChannels(InputOutputArray image, const int dstOrder[4], Stre
|
||||
@param forward true for forward gamma correction or false for inverse gamma correction.
|
||||
@param stream Stream for the asynchronous version.
|
||||
*/
|
||||
CV_EXPORTS void gammaCorrection(InputArray src, OutputArray dst, bool forward = true, Stream& stream = Stream::Null());
|
||||
CV_EXPORTS_W void gammaCorrection(InputArray src, OutputArray dst, bool forward = true, Stream& stream = Stream::Null());
|
||||
|
||||
enum AlphaCompTypes { ALPHA_OVER, ALPHA_IN, ALPHA_OUT, ALPHA_ATOP, ALPHA_XOR, ALPHA_PLUS, ALPHA_OVER_PREMUL, ALPHA_IN_PREMUL, ALPHA_OUT_PREMUL,
|
||||
ALPHA_ATOP_PREMUL, ALPHA_XOR_PREMUL, ALPHA_PLUS_PREMUL, ALPHA_PREMUL};
|
||||
@ -184,7 +184,7 @@ enum AlphaCompTypes { ALPHA_OVER, ALPHA_IN, ALPHA_OUT, ALPHA_ATOP, ALPHA_XOR, AL
|
||||
- An example demonstrating the use of alphaComp can be found at
|
||||
opencv_source_code/samples/gpu/alpha_comp.cpp
|
||||
*/
|
||||
CV_EXPORTS void alphaComp(InputArray img1, InputArray img2, OutputArray dst, int alpha_op, Stream& stream = Stream::Null());
|
||||
CV_EXPORTS_W void alphaComp(InputArray img1, InputArray img2, OutputArray dst, int alpha_op, Stream& stream = Stream::Null());
|
||||
|
||||
//! @} cudaimgproc_color
|
||||
|
||||
@ -199,7 +199,7 @@ CV_EXPORTS void alphaComp(InputArray img1, InputArray img2, OutputArray dst, int
|
||||
@param hist Destination histogram with one row, 256 columns, and the CV_32SC1 type.
|
||||
@param stream Stream for the asynchronous version.
|
||||
*/
|
||||
CV_EXPORTS void calcHist(InputArray src, OutputArray hist, Stream& stream = Stream::Null());
|
||||
CV_EXPORTS_W void calcHist(InputArray src, OutputArray hist, Stream& stream = Stream::Null());
|
||||
|
||||
/** @brief Calculates histogram for one channel 8-bit image confined in given mask.
|
||||
|
||||
@ -208,7 +208,7 @@ CV_EXPORTS void calcHist(InputArray src, OutputArray hist, Stream& stream = Stre
|
||||
@param mask A mask image same size as src and of type CV_8UC1.
|
||||
@param stream Stream for the asynchronous version.
|
||||
*/
|
||||
CV_EXPORTS void calcHist(InputArray src, InputArray mask, OutputArray hist, Stream& stream = Stream::Null());
|
||||
CV_EXPORTS_W void calcHist(InputArray src, InputArray mask, OutputArray hist, Stream& stream = Stream::Null());
|
||||
|
||||
/** @brief Equalizes the histogram of a grayscale image.
|
||||
|
||||
@ -218,11 +218,11 @@ CV_EXPORTS void calcHist(InputArray src, InputArray mask, OutputArray hist, Stre
|
||||
|
||||
@sa equalizeHist
|
||||
*/
|
||||
CV_EXPORTS void equalizeHist(InputArray src, OutputArray dst, Stream& stream = Stream::Null());
|
||||
CV_EXPORTS_W void equalizeHist(InputArray src, OutputArray dst, Stream& stream = Stream::Null());
|
||||
|
||||
/** @brief Base class for Contrast Limited Adaptive Histogram Equalization. :
|
||||
*/
|
||||
class CV_EXPORTS CLAHE : public cv::CLAHE
|
||||
class CV_EXPORTS_W CLAHE : public cv::CLAHE
|
||||
{
|
||||
public:
|
||||
using cv::CLAHE::apply;
|
||||
@ -232,7 +232,7 @@ public:
|
||||
@param dst Destination image.
|
||||
@param stream Stream for the asynchronous version.
|
||||
*/
|
||||
virtual void apply(InputArray src, OutputArray dst, Stream& stream) = 0;
|
||||
CV_WRAP virtual void apply(InputArray src, OutputArray dst, Stream& stream) = 0;
|
||||
};
|
||||
|
||||
/** @brief Creates implementation for cuda::CLAHE .
|
||||
@ -241,7 +241,7 @@ public:
|
||||
@param tileGridSize Size of grid for histogram equalization. Input image will be divided into
|
||||
equally sized rectangular tiles. tileGridSize defines the number of tiles in row and column.
|
||||
*/
|
||||
CV_EXPORTS Ptr<cuda::CLAHE> createCLAHE(double clipLimit = 40.0, Size tileGridSize = Size(8, 8));
|
||||
CV_EXPORTS_W Ptr<cuda::CLAHE> createCLAHE(double clipLimit = 40.0, Size tileGridSize = Size(8, 8));
|
||||
|
||||
/** @brief Computes levels with even distribution.
|
||||
|
||||
@ -251,7 +251,7 @@ CV_EXPORTS Ptr<cuda::CLAHE> createCLAHE(double clipLimit = 40.0, Size tileGridSi
|
||||
@param upperLevel Upper boundary value of the greatest level.
|
||||
@param stream Stream for the asynchronous version.
|
||||
*/
|
||||
CV_EXPORTS void evenLevels(OutputArray levels, int nLevels, int lowerLevel, int upperLevel, Stream& stream = Stream::Null());
|
||||
CV_EXPORTS_W void evenLevels(OutputArray levels, int nLevels, int lowerLevel, int upperLevel, Stream& stream = Stream::Null());
|
||||
|
||||
/** @brief Calculates a histogram with evenly distributed bins.
|
||||
|
||||
@ -263,7 +263,7 @@ a four-channel image, all channels are processed separately.
|
||||
@param upperLevel Upper boundary of highest-level bin.
|
||||
@param stream Stream for the asynchronous version.
|
||||
*/
|
||||
CV_EXPORTS void histEven(InputArray src, OutputArray hist, int histSize, int lowerLevel, int upperLevel, Stream& stream = Stream::Null());
|
||||
CV_EXPORTS_W void histEven(InputArray src, OutputArray hist, int histSize, int lowerLevel, int upperLevel, Stream& stream = Stream::Null());
|
||||
/** @overload */
|
||||
CV_EXPORTS void histEven(InputArray src, GpuMat hist[4], int histSize[4], int lowerLevel[4], int upperLevel[4], Stream& stream = Stream::Null());
|
||||
|
||||
@ -275,7 +275,7 @@ For a four-channel image, all channels are processed separately.
|
||||
@param levels Number of levels in the histogram.
|
||||
@param stream Stream for the asynchronous version.
|
||||
*/
|
||||
CV_EXPORTS void histRange(InputArray src, OutputArray hist, InputArray levels, Stream& stream = Stream::Null());
|
||||
CV_EXPORTS_W void histRange(InputArray src, OutputArray hist, InputArray levels, Stream& stream = Stream::Null());
|
||||
/** @overload */
|
||||
CV_EXPORTS void histRange(InputArray src, GpuMat hist[4], const GpuMat levels[4], Stream& stream = Stream::Null());
|
||||
|
||||
@ -285,7 +285,7 @@ CV_EXPORTS void histRange(InputArray src, GpuMat hist[4], const GpuMat levels[4]
|
||||
|
||||
/** @brief Base class for Canny Edge Detector. :
|
||||
*/
|
||||
class CV_EXPORTS CannyEdgeDetector : public Algorithm
|
||||
class CV_EXPORTS_W CannyEdgeDetector : public Algorithm
|
||||
{
|
||||
public:
|
||||
/** @brief Finds edges in an image using the @cite Canny86 algorithm.
|
||||
@ -294,26 +294,26 @@ public:
|
||||
@param edges Output edge map. It has the same size and type as image.
|
||||
@param stream Stream for the asynchronous version.
|
||||
*/
|
||||
virtual void detect(InputArray image, OutputArray edges, Stream& stream = Stream::Null()) = 0;
|
||||
CV_WRAP virtual void detect(InputArray image, OutputArray edges, Stream& stream = Stream::Null()) = 0;
|
||||
/** @overload
|
||||
@param dx First derivative of image in the vertical direction. Support only CV_32S type.
|
||||
@param dy First derivative of image in the horizontal direction. Support only CV_32S type.
|
||||
@param edges Output edge map. It has the same size and type as image.
|
||||
@param stream Stream for the asynchronous version.
|
||||
*/
|
||||
virtual void detect(InputArray dx, InputArray dy, OutputArray edges, Stream& stream = Stream::Null()) = 0;
|
||||
CV_WRAP virtual void detect(InputArray dx, InputArray dy, OutputArray edges, Stream& stream = Stream::Null()) = 0;
|
||||
|
||||
virtual void setLowThreshold(double low_thresh) = 0;
|
||||
virtual double getLowThreshold() const = 0;
|
||||
CV_WRAP virtual void setLowThreshold(double low_thresh) = 0;
|
||||
CV_WRAP virtual double getLowThreshold() const = 0;
|
||||
|
||||
virtual void setHighThreshold(double high_thresh) = 0;
|
||||
virtual double getHighThreshold() const = 0;
|
||||
CV_WRAP virtual void setHighThreshold(double high_thresh) = 0;
|
||||
CV_WRAP virtual double getHighThreshold() const = 0;
|
||||
|
||||
virtual void setAppertureSize(int apperture_size) = 0;
|
||||
virtual int getAppertureSize() const = 0;
|
||||
CV_WRAP virtual void setAppertureSize(int apperture_size) = 0;
|
||||
CV_WRAP virtual int getAppertureSize() const = 0;
|
||||
|
||||
virtual void setL2Gradient(bool L2gradient) = 0;
|
||||
virtual bool getL2Gradient() const = 0;
|
||||
CV_WRAP virtual void setL2Gradient(bool L2gradient) = 0;
|
||||
CV_WRAP virtual bool getL2Gradient() const = 0;
|
||||
};
|
||||
|
||||
/** @brief Creates implementation for cuda::CannyEdgeDetector .
|
||||
@ -326,7 +326,7 @@ public:
|
||||
L2gradient=true ), or a faster default \f$L_1\f$ norm \f$=|dI/dx|+|dI/dy|\f$ is enough ( L2gradient=false
|
||||
).
|
||||
*/
|
||||
CV_EXPORTS Ptr<CannyEdgeDetector> createCannyEdgeDetector(double low_thresh, double high_thresh, int apperture_size = 3, bool L2gradient = false);
|
||||
CV_EXPORTS_W Ptr<CannyEdgeDetector> createCannyEdgeDetector(double low_thresh, double high_thresh, int apperture_size = 3, bool L2gradient = false);
|
||||
|
||||
/////////////////////////// Hough Transform ////////////////////////////
|
||||
|
||||
|
@ -816,10 +816,10 @@ NCVStatus NCVBroxOpticalFlow(const NCVBroxOpticalFlowDescriptor desc,
|
||||
float scale = 1.0f;
|
||||
|
||||
//cuda arrays for frames
|
||||
std::auto_ptr<FloatVector> pI0(new FloatVector(gpu_mem_allocator, kSizeInPixelsAligned));
|
||||
std::unique_ptr<FloatVector> pI0(new FloatVector(gpu_mem_allocator, kSizeInPixelsAligned));
|
||||
ncvAssertReturn(pI0->isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
|
||||
|
||||
std::auto_ptr<FloatVector> pI1(new FloatVector(gpu_mem_allocator, kSizeInPixelsAligned));
|
||||
std::unique_ptr<FloatVector> pI1(new FloatVector(gpu_mem_allocator, kSizeInPixelsAligned));
|
||||
ncvAssertReturn(pI1->isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
|
||||
|
||||
if (!kSkipProcessing)
|
||||
@ -862,10 +862,10 @@ NCVStatus NCVBroxOpticalFlow(const NCVBroxOpticalFlowDescriptor desc,
|
||||
|
||||
Ncv32u prev_level_pitch = alignUp(prev_level_width, kStrideAlignmentFloat) * sizeof(float);
|
||||
|
||||
std::auto_ptr<FloatVector> level_frame0(new FloatVector(gpu_mem_allocator, buffer_size));
|
||||
std::unique_ptr<FloatVector> level_frame0(new FloatVector(gpu_mem_allocator, buffer_size));
|
||||
ncvAssertReturn(level_frame0->isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
|
||||
|
||||
std::auto_ptr<FloatVector> level_frame1(new FloatVector(gpu_mem_allocator, buffer_size));
|
||||
std::unique_ptr<FloatVector> level_frame1(new FloatVector(gpu_mem_allocator, buffer_size));
|
||||
ncvAssertReturn(level_frame1->isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
|
||||
|
||||
if (!kSkipProcessing)
|
||||
|
@ -151,8 +151,8 @@ public:
|
||||
protected:
|
||||
|
||||
cudaDeviceProp devProp;
|
||||
std::auto_ptr<INCVMemAllocator> allocatorGPU;
|
||||
std::auto_ptr<INCVMemAllocator> allocatorCPU;
|
||||
std::unique_ptr<INCVMemAllocator> allocatorGPU;
|
||||
std::unique_ptr<INCVMemAllocator> allocatorCPU;
|
||||
|
||||
private:
|
||||
|
||||
|
@ -184,8 +184,8 @@ private:
|
||||
}
|
||||
|
||||
NcvBool bInit;
|
||||
std::auto_ptr< INCVMemAllocator > allocatorCPU;
|
||||
std::auto_ptr< NCVMatrixAlloc<T> > data;
|
||||
std::unique_ptr< INCVMemAllocator > allocatorCPU;
|
||||
std::unique_ptr< NCVMatrixAlloc<T> > data;
|
||||
Ncv32u dataWidth;
|
||||
Ncv32u dataHeight;
|
||||
};
|
||||
|
@ -6,4 +6,4 @@ set(the_description "CUDA-accelerated Object Detection")
|
||||
|
||||
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 /wd4324 /wd4512 -Wundef -Wmissing-declarations -Wshadow)
|
||||
|
||||
ocv_define_module(cudaobjdetect opencv_objdetect opencv_cudaarithm opencv_cudawarping OPTIONAL opencv_cudalegacy)
|
||||
ocv_define_module(cudaobjdetect opencv_objdetect opencv_cudaarithm opencv_cudawarping OPTIONAL opencv_cudalegacy WRAP python)
|
||||
|
@ -6,4 +6,4 @@ set(the_description "CUDA-accelerated Optical Flow")
|
||||
|
||||
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 /wd4324 /wd4512 -Wundef -Wmissing-declarations -Wshadow)
|
||||
|
||||
ocv_define_module(cudaoptflow opencv_video opencv_cudaarithm opencv_cudawarping opencv_cudaimgproc OPTIONAL opencv_cudalegacy)
|
||||
ocv_define_module(cudaoptflow opencv_video opencv_cudaarithm opencv_cudawarping opencv_cudaimgproc OPTIONAL opencv_cudalegacy WRAP python)
|
||||
|
@ -6,4 +6,4 @@ set(the_description "CUDA-accelerated Stereo Correspondence")
|
||||
|
||||
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 /wd4324 /wd4512 -Wundef -Wmissing-declarations -Wshadow)
|
||||
|
||||
ocv_define_module(cudastereo opencv_calib3d)
|
||||
ocv_define_module(cudastereo opencv_calib3d WRAP python)
|
||||
|
@ -6,4 +6,4 @@ set(the_description "CUDA-accelerated Image Warping")
|
||||
|
||||
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 /wd4324 /wd4512 -Wundef -Wmissing-declarations -Wshadow)
|
||||
|
||||
ocv_define_module(cudawarping opencv_core opencv_imgproc OPTIONAL opencv_cudev)
|
||||
ocv_define_module(cudawarping opencv_core opencv_imgproc OPTIONAL opencv_cudev WRAP python)
|
||||
|
@ -32,9 +32,7 @@ endforeach(m)
|
||||
|
||||
# header blacklist
|
||||
ocv_list_filterout(opencv_hdrs "modules/.*\\\\.h$")
|
||||
ocv_list_filterout(opencv_hdrs "modules/core/.*/cuda")
|
||||
ocv_list_filterout(opencv_hdrs "modules/cuda.*")
|
||||
ocv_list_filterout(opencv_hdrs "modules/cudev")
|
||||
ocv_list_filterout(opencv_hdrs "modules/core/.*/cuda/")
|
||||
ocv_list_filterout(opencv_hdrs "modules/core/.*/hal/")
|
||||
ocv_list_filterout(opencv_hdrs "modules/core/.*/opencl/")
|
||||
ocv_list_filterout(opencv_hdrs "modules/.+/utils/.*")
|
||||
@ -43,6 +41,10 @@ ocv_list_filterout(opencv_hdrs "modules/.*_inl\\\\.h*")
|
||||
ocv_list_filterout(opencv_hdrs "modules/.*\\\\.details\\\\.h*")
|
||||
ocv_list_filterout(opencv_hdrs "modules/.*\\\\.private\\\\.h*")
|
||||
ocv_list_filterout(opencv_hdrs "modules/.*/detection_based_tracker\\\\.hpp") # Conditional compilation
|
||||
if(NOT HAVE_CUDA)
|
||||
ocv_list_filterout(opencv_hdrs "modules/cuda.*")
|
||||
ocv_list_filterout(opencv_hdrs "modules/cudev")
|
||||
endif()
|
||||
|
||||
set(cv2_generated_files
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/pyopencv_generated_include.h"
|
||||
|
@ -12,17 +12,19 @@ else:
|
||||
|
||||
ignored_arg_types = ["RNG*"]
|
||||
|
||||
pass_by_val_types = ["Point*", "Point2f*", "Rect*", "String*", "double*", "float*", "int*"]
|
||||
|
||||
gen_template_check_self = Template(""" $cname* _self_ = NULL;
|
||||
if(PyObject_TypeCheck(self, &pyopencv_${name}_Type))
|
||||
_self_ = ${amp}((pyopencv_${name}_t*)self)->v${get};
|
||||
if (_self_ == NULL)
|
||||
if (!_self_)
|
||||
return failmsgp("Incorrect type of self (must be '${name}' or its derivative)");
|
||||
""")
|
||||
|
||||
gen_template_check_self_algo = Template(""" $cname* _self_ = NULL;
|
||||
if(PyObject_TypeCheck(self, &pyopencv_${name}_Type))
|
||||
_self_ = dynamic_cast<$cname*>(${amp}((pyopencv_${name}_t*)self)->v.get());
|
||||
if (_self_ == NULL)
|
||||
if (!_self_)
|
||||
return failmsgp("Incorrect type of self (must be '${name}' or its derivative)");
|
||||
""")
|
||||
|
||||
@ -77,7 +79,7 @@ template<> PyObject* pyopencv_from(const ${cname}& r)
|
||||
|
||||
template<> bool pyopencv_to(PyObject* src, ${cname}& dst, const char* name)
|
||||
{
|
||||
if( src == NULL || src == Py_None )
|
||||
if(!src || src == Py_None)
|
||||
return true;
|
||||
if(!PyObject_TypeCheck(src, &pyopencv_${name}_Type))
|
||||
{
|
||||
@ -120,7 +122,7 @@ template<> PyObject* pyopencv_from(const Ptr<${cname}>& r)
|
||||
|
||||
template<> bool pyopencv_to(PyObject* src, Ptr<${cname}>& dst, const char* name)
|
||||
{
|
||||
if( src == NULL || src == Py_None )
|
||||
if(!src || src == Py_None)
|
||||
return true;
|
||||
if(!PyObject_TypeCheck(src, &pyopencv_${name}_Type))
|
||||
{
|
||||
@ -192,7 +194,7 @@ gen_template_get_prop_algo = Template("""
|
||||
static PyObject* pyopencv_${name}_get_${member}(pyopencv_${name}_t* p, void *closure)
|
||||
{
|
||||
$cname* _self_ = dynamic_cast<$cname*>(p->v.get());
|
||||
if (_self_ == NULL)
|
||||
if (!_self_)
|
||||
return failmsgp("Incorrect type of object (must be '${name}' or its derivative)");
|
||||
return pyopencv_from(_self_${access}${member});
|
||||
}
|
||||
@ -201,7 +203,7 @@ static PyObject* pyopencv_${name}_get_${member}(pyopencv_${name}_t* p, void *clo
|
||||
gen_template_set_prop = Template("""
|
||||
static int pyopencv_${name}_set_${member}(pyopencv_${name}_t* p, PyObject *value, void *closure)
|
||||
{
|
||||
if (value == NULL)
|
||||
if (!value)
|
||||
{
|
||||
PyErr_SetString(PyExc_TypeError, "Cannot delete the ${member} attribute");
|
||||
return -1;
|
||||
@ -213,13 +215,13 @@ static int pyopencv_${name}_set_${member}(pyopencv_${name}_t* p, PyObject *value
|
||||
gen_template_set_prop_algo = Template("""
|
||||
static int pyopencv_${name}_set_${member}(pyopencv_${name}_t* p, PyObject *value, void *closure)
|
||||
{
|
||||
if (value == NULL)
|
||||
if (!value)
|
||||
{
|
||||
PyErr_SetString(PyExc_TypeError, "Cannot delete the ${member} attribute");
|
||||
return -1;
|
||||
}
|
||||
$cname* _self_ = dynamic_cast<$cname*>(p->v.get());
|
||||
if (_self_ == NULL)
|
||||
if (!_self_)
|
||||
{
|
||||
failmsgp("Incorrect type of object (must be '${name}' or its derivative)");
|
||||
return -1;
|
||||
@ -402,7 +404,7 @@ class ArgInfo(object):
|
||||
self.py_outputarg = False
|
||||
|
||||
def isbig(self):
|
||||
return self.tp == "Mat" or self.tp == "vector_Mat"\
|
||||
return self.tp == "Mat" or self.tp == "vector_Mat" or self.tp == "cuda::GpuMat"\
|
||||
or self.tp == "UMat" or self.tp == "vector_UMat" # or self.tp.startswith("vector")
|
||||
|
||||
def crepr(self):
|
||||
@ -656,15 +658,12 @@ class FuncInfo(object):
|
||||
tp1 = tp = a.tp
|
||||
amp = ""
|
||||
defval0 = ""
|
||||
if tp.endswith("*"):
|
||||
if tp in pass_by_val_types:
|
||||
tp = tp1 = tp[:-1]
|
||||
amp = "&"
|
||||
if tp.endswith("*"):
|
||||
defval0 = "0"
|
||||
tp1 = tp.replace("*", "_ptr")
|
||||
if tp1.endswith("*"):
|
||||
print("Error: type with star: a.tp=%s, tp=%s, tp1=%s" % (a.tp, tp, tp1))
|
||||
sys.exit(-1)
|
||||
|
||||
amapping = simple_argtype_mapping.get(tp, (tp, "O", defval0))
|
||||
parse_name = a.name
|
||||
@ -686,6 +685,9 @@ class FuncInfo(object):
|
||||
if "UMat" in tp:
|
||||
if "Mat" in defval and "UMat" not in defval:
|
||||
defval = defval.replace("Mat", "UMat")
|
||||
if "cuda::GpuMat" in tp:
|
||||
if "Mat" in defval and "GpuMat" not in defval:
|
||||
defval = defval.replace("Mat", "cuda::GpuMat")
|
||||
# "tp arg = tp();" is equivalent to "tp arg;" in the case of complex types
|
||||
if defval == tp + "()" and amapping[1] == "O":
|
||||
defval = ""
|
||||
@ -754,7 +756,7 @@ class FuncInfo(object):
|
||||
parse_arglist = ", ".join(["&" + all_cargs[argno][1] for aname, argno in v.py_arglist]),
|
||||
code_cvt = " &&\n ".join(code_cvt_list))
|
||||
else:
|
||||
code_parse = "if(PyObject_Size(args) == 0 && (kw == NULL || PyObject_Size(kw) == 0))"
|
||||
code_parse = "if(PyObject_Size(args) == 0 && (!kw || PyObject_Size(kw) == 0))"
|
||||
|
||||
if len(v.py_outlist) == 0:
|
||||
code_ret = "Py_RETURN_NONE"
|
||||
@ -975,7 +977,7 @@ class PythonWrapperGenerator(object):
|
||||
|
||||
def gen(self, srcfiles, output_path):
|
||||
self.clear()
|
||||
self.parser = hdr_parser.CppHeaderParser(generate_umat_decls=True)
|
||||
self.parser = hdr_parser.CppHeaderParser(generate_umat_decls=True, generate_gpumat_decls=True)
|
||||
|
||||
# step 1: scan the headers and build more descriptive maps of classes, consts, functions
|
||||
for hdr in srcfiles:
|
||||
|
@ -32,8 +32,9 @@ original_return_type is None if the original_return_type is the same as return_v
|
||||
|
||||
class CppHeaderParser(object):
|
||||
|
||||
def __init__(self, generate_umat_decls=False):
|
||||
def __init__(self, generate_umat_decls=False, generate_gpumat_decls=False):
|
||||
self._generate_umat_decls = generate_umat_decls
|
||||
self._generate_gpumat_decls = generate_gpumat_decls
|
||||
|
||||
self.BLOCK_TYPE = 0
|
||||
self.BLOCK_NAME = 1
|
||||
@ -379,7 +380,7 @@ class CppHeaderParser(object):
|
||||
print(decl_str)
|
||||
return decl
|
||||
|
||||
def parse_func_decl(self, decl_str, use_umat=False, docstring=""):
|
||||
def parse_func_decl(self, decl_str, mat="Mat", docstring=""):
|
||||
"""
|
||||
Parses the function or method declaration in the form:
|
||||
[([CV_EXPORTS] <rettype>) | CVAPI(rettype)]
|
||||
@ -563,8 +564,6 @@ class CppHeaderParser(object):
|
||||
a = a[:eqpos].strip()
|
||||
arg_type, arg_name, modlist, argno = self.parse_arg(a, argno)
|
||||
if self.wrap_mode:
|
||||
mat = "UMat" if use_umat else "Mat"
|
||||
|
||||
# TODO: Vectors should contain UMat, but this is not very easy to support and not very needed
|
||||
vector_mat = "vector_{}".format("Mat")
|
||||
vector_mat_template = "vector<{}>".format("Mat")
|
||||
@ -639,7 +638,7 @@ class CppHeaderParser(object):
|
||||
n = "cv.Algorithm"
|
||||
return n
|
||||
|
||||
def parse_stmt(self, stmt, end_token, use_umat=False, docstring=""):
|
||||
def parse_stmt(self, stmt, end_token, mat="Mat", docstring=""):
|
||||
"""
|
||||
parses the statement (ending with ';' or '}') or a block head (ending with '{')
|
||||
|
||||
@ -731,7 +730,7 @@ class CppHeaderParser(object):
|
||||
# since we filtered off the other places where '(' can normally occur:
|
||||
# - code blocks
|
||||
# - function pointer typedef's
|
||||
decl = self.parse_func_decl(stmt, use_umat=use_umat, docstring=docstring)
|
||||
decl = self.parse_func_decl(stmt, mat=mat, docstring=docstring)
|
||||
# we return parse_flag == False to prevent the parser to look inside function/method bodies
|
||||
# (except for tracking the nested blocks)
|
||||
return stmt_type, "", False, decl
|
||||
@ -902,14 +901,24 @@ class CppHeaderParser(object):
|
||||
else:
|
||||
decls.append(decl)
|
||||
|
||||
if self._generate_gpumat_decls and "cv.cuda." in decl[0]:
|
||||
# If function takes as one of arguments Mat or vector<Mat> - we want to create the
|
||||
# same declaration working with GpuMat (this is important for T-Api access)
|
||||
args = decl[3]
|
||||
has_mat = len(list(filter(lambda x: x[0] in {"Mat", "vector_Mat"}, args))) > 0
|
||||
if has_mat:
|
||||
_, _, _, gpumat_decl = self.parse_stmt(stmt, token, mat="cuda::GpuMat", docstring=docstring)
|
||||
decls.append(gpumat_decl)
|
||||
|
||||
if self._generate_umat_decls:
|
||||
# If function takes as one of arguments Mat or vector<Mat> - we want to create the
|
||||
# same declaration working with UMat (this is important for T-Api access)
|
||||
args = decl[3]
|
||||
has_mat = len(list(filter(lambda x: x[0] in {"Mat", "vector_Mat"}, args))) > 0
|
||||
if has_mat:
|
||||
_, _, _, umat_decl = self.parse_stmt(stmt, token, use_umat=True, docstring=docstring)
|
||||
_, _, _, umat_decl = self.parse_stmt(stmt, token, mat="UMat", docstring=docstring)
|
||||
decls.append(umat_decl)
|
||||
|
||||
docstring = ""
|
||||
if stmt_type == "namespace":
|
||||
chunks = [block[1] for block in self.block_stack if block[0] == 'namespace'] + [name]
|
||||
@ -952,7 +961,7 @@ class CppHeaderParser(object):
|
||||
print()
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = CppHeaderParser(generate_umat_decls=True)
|
||||
parser = CppHeaderParser(generate_umat_decls=True, generate_gpumat_decls=True)
|
||||
decls = []
|
||||
for hname in opencv_hdr_list:
|
||||
decls += parser.parse(hname)
|
||||
|
45
modules/python/test/test_cuda.py
Normal file
45
modules/python/test/test_cuda.py
Normal file
@ -0,0 +1,45 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
'''
|
||||
CUDA-accelerated Computer Vision functions
|
||||
'''
|
||||
|
||||
# Python 2/3 compatibility
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import cv2 as cv
|
||||
|
||||
from tests_common import NewOpenCVTests
|
||||
|
||||
class cuda_test(NewOpenCVTests):
|
||||
def setUp(self):
|
||||
if not cv.cuda.getCudaEnabledDeviceCount():
|
||||
self.skipTest("No CUDA-capable device is detected")
|
||||
|
||||
def test_cuda_upload_download(self):
|
||||
npMat = (np.random.random((200, 200, 3)) * 255).astype(np.uint8)
|
||||
gpuMat = cv.cuda_GpuMat()
|
||||
gpuMat.upload(npMat)
|
||||
|
||||
self.assertTrue(np.allclose(gpuMat.download(), npMat))
|
||||
|
||||
def test_cuda_imgproc_cvtColor(self):
|
||||
npMat = (np.random.random((200, 200, 3)) * 255).astype(np.uint8)
|
||||
gpuMat = cv.cuda_GpuMat()
|
||||
gpuMat.upload(npMat)
|
||||
gpuMat2 = cv.cuda.cvtColor(gpuMat, cv.COLOR_BGR2HSV)
|
||||
|
||||
self.assertTrue(np.allclose(gpuMat2.download(), cv.cvtColor(npMat, cv.COLOR_BGR2HSV)))
|
||||
|
||||
def test_cuda_filter_laplacian(self):
|
||||
npMat = (np.random.random((200, 200)) * 255).astype(np.uint16)
|
||||
gpuMat = cv.cuda_GpuMat()
|
||||
gpuMat.upload(npMat)
|
||||
gpuMat = cv.cuda.createLaplacianFilter(cv.CV_16UC1, -1, ksize=3).apply(gpuMat)
|
||||
|
||||
self.assertTrue(np.allclose(gpuMat.download(), cv.Laplacian(npMat, cv.CV_16UC1, ksize=3)))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
NewOpenCVTests.bootstrap()
|
Loading…
Reference in New Issue
Block a user