Merge pull request #12234 from cv3d:python/cuda/wrapping_functionalities

This commit is contained in:
Alexander Alekhin 2018-08-30 20:23:37 +00:00
commit 90f47eb952
17 changed files with 818 additions and 625 deletions

View File

@ -105,7 +105,7 @@ streams.
class CV_EXPORTS_W GpuMat
{
public:
class CV_EXPORTS Allocator
class CV_EXPORTS_W Allocator
{
public:
virtual ~Allocator() {}
@ -116,8 +116,8 @@ public:
};
//! default allocator
static Allocator* defaultAllocator();
static void setDefaultAllocator(Allocator* allocator);
CV_WRAP static GpuMat::Allocator* defaultAllocator();
CV_WRAP static void setDefaultAllocator(GpuMat::Allocator* allocator);
//! default constructor
CV_WRAP explicit GpuMat(GpuMat::Allocator* allocator = GpuMat::defaultAllocator());
@ -131,15 +131,15 @@ public:
CV_WRAP GpuMat(Size size, int type, Scalar s, GpuMat::Allocator* allocator = GpuMat::defaultAllocator());
//! copy constructor
GpuMat(const GpuMat& m);
CV_WRAP GpuMat(const GpuMat& m);
//! constructor for GpuMat headers pointing to user-allocated data
GpuMat(int rows, int cols, int type, void* data, size_t step = Mat::AUTO_STEP);
GpuMat(Size size, int type, void* data, size_t step = Mat::AUTO_STEP);
CV_WRAP GpuMat(int rows, int cols, int type, void* data, size_t step = Mat::AUTO_STEP);
CV_WRAP GpuMat(Size size, int type, void* data, size_t step = Mat::AUTO_STEP);
//! creates a GpuMat header for a part of the bigger matrix
GpuMat(const GpuMat& m, Range rowRange, Range colRange);
GpuMat(const GpuMat& m, Rect roi);
CV_WRAP GpuMat(const GpuMat& m, Range rowRange, Range colRange);
CV_WRAP GpuMat(const GpuMat& m, Rect roi);
//! builds GpuMat from host memory (Blocking call)
CV_WRAP explicit GpuMat(InputArray arr, GpuMat::Allocator* allocator = GpuMat::defaultAllocator());
@ -158,7 +158,7 @@ public:
void release();
//! swaps with other smart pointer
void swap(GpuMat& mat);
CV_WRAP void swap(GpuMat& mat);
/** @brief Performs data upload to GpuMat (Blocking call)
@ -195,7 +195,7 @@ public:
CV_WRAP void download(OutputArray dst, Stream& stream) const;
//! returns deep copy of the GpuMat, i.e. the data is copied
GpuMat clone() const;
CV_WRAP GpuMat clone() const;
//! copies the GpuMat content to device memory (Blocking call)
CV_WRAP void copyTo(OutputArray dst) const;
@ -210,16 +210,16 @@ public:
CV_WRAP void copyTo(OutputArray dst, InputArray mask, Stream& stream) const;
//! sets some of the GpuMat elements to s (Blocking call)
GpuMat& setTo(Scalar s);
CV_WRAP GpuMat& setTo(Scalar s);
//! sets some of the GpuMat elements to s (Non-Blocking call)
GpuMat& setTo(Scalar s, Stream& stream);
CV_WRAP GpuMat& setTo(Scalar s, Stream& stream);
//! sets some of the GpuMat elements to s, according to the mask (Blocking call)
GpuMat& setTo(Scalar s, InputArray mask);
CV_WRAP GpuMat& setTo(Scalar s, InputArray mask);
//! sets some of the GpuMat elements to s, according to the mask (Non-Blocking call)
GpuMat& setTo(Scalar s, InputArray mask, Stream& stream);
CV_WRAP GpuMat& setTo(Scalar s, InputArray mask, Stream& stream);
//! converts GpuMat to another datatype (Blocking call)
CV_WRAP void convertTo(OutputArray dst, int rtype) const;
@ -236,7 +236,7 @@ public:
//! converts GpuMat to another datatype with scaling (Non-Blocking call)
CV_WRAP void convertTo(OutputArray dst, int rtype, double alpha, double beta, Stream& stream) const;
void assignTo(GpuMat& m, int type=-1) const;
CV_WRAP void assignTo(GpuMat& m, int type = -1) const;
//! returns pointer to y-th row
uchar* ptr(int y = 0);
@ -250,18 +250,18 @@ public:
template <typename _Tp> operator PtrStep<_Tp>() const;
//! returns a new GpuMat header for the specified row
GpuMat row(int y) const;
CV_WRAP GpuMat row(int y) const;
//! returns a new GpuMat header for the specified column
GpuMat col(int x) const;
CV_WRAP GpuMat col(int x) const;
//! ... for the specified row span
GpuMat rowRange(int startrow, int endrow) const;
GpuMat rowRange(Range r) const;
CV_WRAP GpuMat rowRange(int startrow, int endrow) const;
CV_WRAP GpuMat rowRange(Range r) const;
//! ... for the specified column span
GpuMat colRange(int startcol, int endcol) const;
GpuMat colRange(Range r) const;
CV_WRAP GpuMat colRange(int startcol, int endcol) const;
CV_WRAP GpuMat colRange(Range r) const;
//! extracts a rectangular sub-GpuMat (this is a generalized form of row, rowRange etc.)
GpuMat operator ()(Range rowRange, Range colRange) const;
@ -269,44 +269,44 @@ public:
//! creates alternative GpuMat header for the same data, with different
//! number of channels and/or different number of rows
GpuMat reshape(int cn, int rows = 0) const;
CV_WRAP GpuMat reshape(int cn, int rows = 0) const;
//! locates GpuMat header within a parent GpuMat
void locateROI(Size& wholeSize, Point& ofs) const;
CV_WRAP void locateROI(Size& wholeSize, Point& ofs) const;
//! moves/resizes the current GpuMat ROI inside the parent GpuMat
GpuMat& adjustROI(int dtop, int dbottom, int dleft, int dright);
CV_WRAP GpuMat& adjustROI(int dtop, int dbottom, int dleft, int dright);
//! returns true iff the GpuMat data is continuous
//! (i.e. when there are no gaps between successive rows)
bool isContinuous() const;
CV_WRAP bool isContinuous() const;
//! returns element size in bytes
size_t elemSize() const;
CV_WRAP size_t elemSize() const;
//! returns the size of element channel in bytes
size_t elemSize1() const;
CV_WRAP size_t elemSize1() const;
//! returns element type
int type() const;
CV_WRAP int type() const;
//! returns element type
int depth() const;
CV_WRAP int depth() const;
//! returns number of channels
int channels() const;
CV_WRAP int channels() const;
//! returns step/elemSize1()
size_t step1() const;
CV_WRAP size_t step1() const;
//! returns GpuMat size : width == number of columns, height == number of rows
Size size() const;
CV_WRAP Size size() const;
//! returns true if GpuMat data is NULL
bool empty() const;
CV_WRAP bool empty() const;
//! internal use method: updates the continuity flag
void updateContinuityFlag();
CV_WRAP void updateContinuityFlag();
/*! includes several bit-fields:
- the magic signature
@ -320,7 +320,7 @@ public:
int rows, cols;
//! a distance between successive rows in bytes; includes the gap if any
size_t step;
CV_PROP size_t step;
//! pointer to the data
uchar* data;
@ -348,7 +348,7 @@ public:
Matrix is called continuous if its elements are stored continuously, that is, without gaps at the
end of each row.
*/
CV_EXPORTS void createContinuous(int rows, int cols, int type, OutputArray arr);
CV_EXPORTS_W void createContinuous(int rows, int cols, int type, OutputArray arr);
/** @brief Ensures that the size of a matrix is big enough and the matrix has a proper type.
@ -359,7 +359,7 @@ CV_EXPORTS void createContinuous(int rows, int cols, int type, OutputArray arr);
The function does not reallocate memory if the matrix has proper attributes already.
*/
CV_EXPORTS void ensureSizeIsEnough(int rows, int cols, int type, OutputArray arr);
CV_EXPORTS_W void ensureSizeIsEnough(int rows, int cols, int type, OutputArray arr);
/** @brief BufferPool for use with CUDA streams
@ -478,7 +478,7 @@ and the corresponding memory is automatically returned to the pool for later usa
}
@endcode
*/
class CV_EXPORTS BufferPool
class CV_EXPORTS_W BufferPool
{
public:
@ -486,21 +486,21 @@ public:
explicit BufferPool(Stream& stream);
//! Allocates a new GpuMat of given size and type.
GpuMat getBuffer(int rows, int cols, int type);
CV_WRAP GpuMat getBuffer(int rows, int cols, int type);
//! Allocates a new GpuMat of given size and type.
GpuMat getBuffer(Size size, int type) { return getBuffer(size.height, size.width, type); }
CV_WRAP GpuMat getBuffer(Size size, int type) { return getBuffer(size.height, size.width, type); }
//! Returns the allocator associated with the stream.
Ptr<GpuMat::Allocator> getAllocator() const { return allocator_; }
CV_WRAP Ptr<GpuMat::Allocator> getAllocator() const { return allocator_; }
private:
Ptr<GpuMat::Allocator> allocator_;
};
//! BufferPool management (must be called before Stream creation)
CV_EXPORTS void setBufferPoolUsage(bool on);
CV_EXPORTS void setBufferPoolConfig(int deviceId, size_t stackSize, int stackCount);
CV_EXPORTS_W void setBufferPoolUsage(bool on);
CV_EXPORTS_W void setBufferPoolConfig(int deviceId, size_t stackSize, int stackCount);
//===================================================================================
// HostMem
@ -521,46 +521,46 @@ Its interface is also Mat-like but with additional memory type parameters.
@note Allocation size of such memory types is usually limited. For more details, see *CUDA 2.2
Pinned Memory APIs* document or *CUDA C Programming Guide*.
*/
class CV_EXPORTS HostMem
class CV_EXPORTS_W HostMem
{
public:
enum AllocType { PAGE_LOCKED = 1, SHARED = 2, WRITE_COMBINED = 4 };
static MatAllocator* getAllocator(AllocType alloc_type = PAGE_LOCKED);
static MatAllocator* getAllocator(HostMem::AllocType alloc_type = HostMem::AllocType::PAGE_LOCKED);
explicit HostMem(AllocType alloc_type = PAGE_LOCKED);
CV_WRAP explicit HostMem(HostMem::AllocType alloc_type = HostMem::AllocType::PAGE_LOCKED);
HostMem(const HostMem& m);
HostMem(int rows, int cols, int type, AllocType alloc_type = PAGE_LOCKED);
HostMem(Size size, int type, AllocType alloc_type = PAGE_LOCKED);
CV_WRAP HostMem(int rows, int cols, int type, HostMem::AllocType alloc_type = HostMem::AllocType::PAGE_LOCKED);
CV_WRAP HostMem(Size size, int type, HostMem::AllocType alloc_type = HostMem::AllocType::PAGE_LOCKED);
//! creates from host memory with coping data
explicit HostMem(InputArray arr, AllocType alloc_type = PAGE_LOCKED);
CV_WRAP explicit HostMem(InputArray arr, HostMem::AllocType alloc_type = HostMem::AllocType::PAGE_LOCKED);
~HostMem();
HostMem& operator =(const HostMem& m);
//! swaps with other smart pointer
void swap(HostMem& b);
CV_WRAP void swap(HostMem& b);
//! returns deep copy of the matrix, i.e. the data is copied
HostMem clone() const;
CV_WRAP HostMem clone() const;
//! allocates new matrix data unless the matrix already has specified size and type.
void create(int rows, int cols, int type);
CV_WRAP void create(int rows, int cols, int type);
void create(Size size, int type);
//! creates alternative HostMem header for the same data, with different
//! number of channels and/or different number of rows
HostMem reshape(int cn, int rows = 0) const;
CV_WRAP HostMem reshape(int cn, int rows = 0) const;
//! decrements reference counter and released memory if needed.
void release();
//! returns matrix header with disabled reference counting for HostMem data.
Mat createMatHeader() const;
CV_WRAP Mat createMatHeader() const;
/** @brief Maps CPU memory to GPU address space and creates the cuda::GpuMat header without reference counting
for it.
@ -572,20 +572,20 @@ public:
GpuMat createGpuMatHeader() const;
// Please see cv::Mat for descriptions
bool isContinuous() const;
size_t elemSize() const;
size_t elemSize1() const;
int type() const;
int depth() const;
int channels() const;
size_t step1() const;
Size size() const;
bool empty() const;
CV_WRAP bool isContinuous() const;
CV_WRAP size_t elemSize() const;
CV_WRAP size_t elemSize1() const;
CV_WRAP int type() const;
CV_WRAP int depth() const;
CV_WRAP int channels() const;
CV_WRAP size_t step1() const;
CV_WRAP Size size() const;
CV_WRAP bool empty() const;
// Please see cv::Mat for descriptions
int flags;
int rows, cols;
size_t step;
CV_PROP size_t step;
uchar* data;
int* refcount;
@ -600,13 +600,13 @@ public:
@param m Input matrix.
*/
CV_EXPORTS void registerPageLocked(Mat& m);
CV_EXPORTS_W void registerPageLocked(Mat& m);
/** @brief Unmaps the memory of matrix and makes it pageable again.
@param m Input matrix.
*/
CV_EXPORTS void unregisterPageLocked(Mat& m);
CV_EXPORTS_W void unregisterPageLocked(Mat& m);
//===================================================================================
// Stream
@ -639,7 +639,7 @@ void thread2()
@note By default all CUDA routines are launched in Stream::Null() object, if the stream is not specified by user.
In multi-threading environment the stream objects must be passed explicitly (see previous note).
*/
class CV_EXPORTS Stream
class CV_EXPORTS_W Stream
{
typedef void (Stream::*bool_type)() const;
void this_type_does_not_support_comparisons() const {}
@ -648,22 +648,22 @@ public:
typedef void (*StreamCallback)(int status, void* userData);
//! creates a new asynchronous stream
Stream();
CV_WRAP Stream();
//! creates a new asynchronous stream with custom allocator
Stream(const Ptr<GpuMat::Allocator>& allocator);
CV_WRAP Stream(const Ptr<GpuMat::Allocator>& allocator);
/** @brief Returns true if the current stream queue is finished. Otherwise, it returns false.
*/
bool queryIfComplete() const;
CV_WRAP bool queryIfComplete() const;
/** @brief Blocks the current CPU thread until all operations in the stream are complete.
*/
void waitForCompletion();
CV_WRAP void waitForCompletion();
/** @brief Makes a compute stream wait on an event.
*/
void waitEvent(const Event& event);
CV_WRAP void waitEvent(const Event& event);
/** @brief Adds a callback to be called on the host after all currently enqueued items in the stream have
completed.
@ -676,7 +676,7 @@ public:
void enqueueHostCallback(StreamCallback callback, void* userData);
//! return Stream object for default CUDA stream
static Stream& Null();
CV_WRAP static Stream& Null();
//! returns true if stream object is not default (!= 0)
operator bool_type() const;
@ -692,7 +692,7 @@ private:
friend class DefaultDeviceInitializer;
};
class CV_EXPORTS Event
class CV_EXPORTS_W Event
{
public:
enum CreateFlags
@ -703,19 +703,19 @@ public:
INTERPROCESS = 0x04 /**< Event is suitable for interprocess use. DisableTiming must be set */
};
explicit Event(CreateFlags flags = DEFAULT);
CV_WRAP explicit Event(Event::CreateFlags flags = Event::CreateFlags::DEFAULT);
//! records an event
void record(Stream& stream = Stream::Null());
CV_WRAP void record(Stream& stream = Stream::Null());
//! queries an event's status
bool queryIfComplete() const;
CV_WRAP bool queryIfComplete() const;
//! waits for an event to complete
void waitForCompletion();
CV_WRAP void waitForCompletion();
//! computes the elapsed time between events
static float elapsedTime(const Event& start, const Event& end);
CV_WRAP static float elapsedTime(const Event& start, const Event& end);
class Impl;
@ -793,7 +793,7 @@ built for.
According to the CUDA C Programming Guide Version 3.2: "PTX code produced for some specific compute
capability can always be compiled to binary code of greater or equal compute capability".
*/
class CV_EXPORTS TargetArchs
class CV_EXPORTS_W TargetArchs
{
public:
/** @brief The following method checks whether the module was built with the support of the given feature:
@ -808,23 +808,23 @@ public:
@param major Major compute capability version.
@param minor Minor compute capability version.
*/
static bool has(int major, int minor);
static bool hasPtx(int major, int minor);
static bool hasBin(int major, int minor);
CV_WRAP static bool has(int major, int minor);
CV_WRAP static bool hasPtx(int major, int minor);
CV_WRAP static bool hasBin(int major, int minor);
static bool hasEqualOrLessPtx(int major, int minor);
static bool hasEqualOrGreater(int major, int minor);
static bool hasEqualOrGreaterPtx(int major, int minor);
static bool hasEqualOrGreaterBin(int major, int minor);
CV_WRAP static bool hasEqualOrLessPtx(int major, int minor);
CV_WRAP static bool hasEqualOrGreater(int major, int minor);
CV_WRAP static bool hasEqualOrGreaterPtx(int major, int minor);
CV_WRAP static bool hasEqualOrGreaterBin(int major, int minor);
};
/** @brief Class providing functionality for querying the specified GPU properties.
*/
class CV_EXPORTS DeviceInfo
class CV_EXPORTS_W DeviceInfo
{
public:
//! creates DeviceInfo object for the current GPU
DeviceInfo();
CV_WRAP DeviceInfo();
/** @brief The constructors.
@ -833,68 +833,68 @@ public:
Constructs the DeviceInfo object for the specified device. If device_id parameter is missed, it
constructs an object for the current device.
*/
DeviceInfo(int device_id);
CV_WRAP DeviceInfo(int device_id);
/** @brief Returns system index of the CUDA device starting with 0.
*/
int deviceID() const;
CV_WRAP int deviceID() const;
//! ASCII string identifying device
const char* name() const;
//! global memory available on device in bytes
size_t totalGlobalMem() const;
CV_WRAP size_t totalGlobalMem() const;
//! shared memory available per block in bytes
size_t sharedMemPerBlock() const;
CV_WRAP size_t sharedMemPerBlock() const;
//! 32-bit registers available per block
int regsPerBlock() const;
CV_WRAP int regsPerBlock() const;
//! warp size in threads
int warpSize() const;
CV_WRAP int warpSize() const;
//! maximum pitch in bytes allowed by memory copies
size_t memPitch() const;
CV_WRAP size_t memPitch() const;
//! maximum number of threads per block
int maxThreadsPerBlock() const;
CV_WRAP int maxThreadsPerBlock() const;
//! maximum size of each dimension of a block
Vec3i maxThreadsDim() const;
CV_WRAP Vec3i maxThreadsDim() const;
//! maximum size of each dimension of a grid
Vec3i maxGridSize() const;
CV_WRAP Vec3i maxGridSize() const;
//! clock frequency in kilohertz
int clockRate() const;
CV_WRAP int clockRate() const;
//! constant memory available on device in bytes
size_t totalConstMem() const;
CV_WRAP size_t totalConstMem() const;
//! major compute capability
int majorVersion() const;
CV_WRAP int majorVersion() const;
//! minor compute capability
int minorVersion() const;
CV_WRAP int minorVersion() const;
//! alignment requirement for textures
size_t textureAlignment() const;
CV_WRAP size_t textureAlignment() const;
//! pitch alignment requirement for texture references bound to pitched memory
size_t texturePitchAlignment() const;
CV_WRAP size_t texturePitchAlignment() const;
//! number of multiprocessors on device
int multiProcessorCount() const;
CV_WRAP int multiProcessorCount() const;
//! specified whether there is a run time limit on kernels
bool kernelExecTimeoutEnabled() const;
CV_WRAP bool kernelExecTimeoutEnabled() const;
//! device is integrated as opposed to discrete
bool integrated() const;
CV_WRAP bool integrated() const;
//! device can map host memory with cudaHostAlloc/cudaHostGetDevicePointer
bool canMapHostMemory() const;
CV_WRAP bool canMapHostMemory() const;
enum ComputeMode
{
@ -905,108 +905,108 @@ public:
};
//! compute mode
ComputeMode computeMode() const;
CV_WRAP DeviceInfo::ComputeMode computeMode() const;
//! maximum 1D texture size
int maxTexture1D() const;
CV_WRAP int maxTexture1D() const;
//! maximum 1D mipmapped texture size
int maxTexture1DMipmap() const;
CV_WRAP int maxTexture1DMipmap() const;
//! maximum size for 1D textures bound to linear memory
int maxTexture1DLinear() const;
CV_WRAP int maxTexture1DLinear() const;
//! maximum 2D texture dimensions
Vec2i maxTexture2D() const;
CV_WRAP Vec2i maxTexture2D() const;
//! maximum 2D mipmapped texture dimensions
Vec2i maxTexture2DMipmap() const;
CV_WRAP Vec2i maxTexture2DMipmap() const;
//! maximum dimensions (width, height, pitch) for 2D textures bound to pitched memory
Vec3i maxTexture2DLinear() const;
CV_WRAP Vec3i maxTexture2DLinear() const;
//! maximum 2D texture dimensions if texture gather operations have to be performed
Vec2i maxTexture2DGather() const;
CV_WRAP Vec2i maxTexture2DGather() const;
//! maximum 3D texture dimensions
Vec3i maxTexture3D() const;
CV_WRAP Vec3i maxTexture3D() const;
//! maximum Cubemap texture dimensions
int maxTextureCubemap() const;
CV_WRAP int maxTextureCubemap() const;
//! maximum 1D layered texture dimensions
Vec2i maxTexture1DLayered() const;
CV_WRAP Vec2i maxTexture1DLayered() const;
//! maximum 2D layered texture dimensions
Vec3i maxTexture2DLayered() const;
CV_WRAP Vec3i maxTexture2DLayered() const;
//! maximum Cubemap layered texture dimensions
Vec2i maxTextureCubemapLayered() const;
CV_WRAP Vec2i maxTextureCubemapLayered() const;
//! maximum 1D surface size
int maxSurface1D() const;
CV_WRAP int maxSurface1D() const;
//! maximum 2D surface dimensions
Vec2i maxSurface2D() const;
CV_WRAP Vec2i maxSurface2D() const;
//! maximum 3D surface dimensions
Vec3i maxSurface3D() const;
CV_WRAP Vec3i maxSurface3D() const;
//! maximum 1D layered surface dimensions
Vec2i maxSurface1DLayered() const;
CV_WRAP Vec2i maxSurface1DLayered() const;
//! maximum 2D layered surface dimensions
Vec3i maxSurface2DLayered() const;
CV_WRAP Vec3i maxSurface2DLayered() const;
//! maximum Cubemap surface dimensions
int maxSurfaceCubemap() const;
CV_WRAP int maxSurfaceCubemap() const;
//! maximum Cubemap layered surface dimensions
Vec2i maxSurfaceCubemapLayered() const;
CV_WRAP Vec2i maxSurfaceCubemapLayered() const;
//! alignment requirements for surfaces
size_t surfaceAlignment() const;
CV_WRAP size_t surfaceAlignment() const;
//! device can possibly execute multiple kernels concurrently
bool concurrentKernels() const;
CV_WRAP bool concurrentKernels() const;
//! device has ECC support enabled
bool ECCEnabled() const;
CV_WRAP bool ECCEnabled() const;
//! PCI bus ID of the device
int pciBusID() const;
CV_WRAP int pciBusID() const;
//! PCI device ID of the device
int pciDeviceID() const;
CV_WRAP int pciDeviceID() const;
//! PCI domain ID of the device
int pciDomainID() const;
CV_WRAP int pciDomainID() const;
//! true if device is a Tesla device using TCC driver, false otherwise
bool tccDriver() const;
CV_WRAP bool tccDriver() const;
//! number of asynchronous engines
int asyncEngineCount() const;
CV_WRAP int asyncEngineCount() const;
//! device shares a unified address space with the host
bool unifiedAddressing() const;
CV_WRAP bool unifiedAddressing() const;
//! peak memory clock frequency in kilohertz
int memoryClockRate() const;
CV_WRAP int memoryClockRate() const;
//! global memory bus width in bits
int memoryBusWidth() const;
CV_WRAP int memoryBusWidth() const;
//! size of L2 cache in bytes
int l2CacheSize() const;
CV_WRAP int l2CacheSize() const;
//! maximum resident threads per multiprocessor
int maxThreadsPerMultiProcessor() const;
CV_WRAP int maxThreadsPerMultiProcessor() const;
//! gets free and total device memory
void queryMemory(size_t& totalMemory, size_t& freeMemory) const;
size_t freeMemory() const;
size_t totalMemory() const;
CV_WRAP void queryMemory(size_t& totalMemory, size_t& freeMemory) const;
CV_WRAP size_t freeMemory() const;
CV_WRAP size_t totalMemory() const;
/** @brief Provides information on CUDA feature support.
@ -1021,7 +1021,7 @@ public:
This function returns true if the CUDA module can be run on the specified device. Otherwise, it
returns false .
*/
bool isCompatible() const;
CV_WRAP bool isCompatible() const;
private:
int device_id_;

View File

@ -4,51 +4,27 @@
typedef std::vector<cuda::GpuMat> vector_GpuMat;
typedef cuda::GpuMat::Allocator GpuMat_Allocator;
typedef cuda::HostMem::AllocType HostMem_AllocType;
typedef cuda::Event::CreateFlags Event_CreateFlags;
template<> bool pyopencv_to(PyObject* o, Ptr<cuda::GpuMat>& m, const char* name);
template<> PyObject* pyopencv_from(const Ptr<cuda::GpuMat>& m);
CV_PY_TO_CLASS(cuda::GpuMat);
CV_PY_TO_CLASS(cuda::Stream);
CV_PY_TO_CLASS(cuda::Event);
CV_PY_TO_CLASS(cuda::HostMem);
template<>
bool pyopencv_to(PyObject* o, cuda::GpuMat& m, const char* name)
{
if (!o || o == Py_None)
return true;
Ptr<cuda::GpuMat> mPtr(new cuda::GpuMat());
CV_PY_TO_CLASS_PTR(cuda::GpuMat);
CV_PY_TO_CLASS_PTR(cuda::GpuMat::Allocator);
if (!pyopencv_to(o, mPtr, name)) return false;
m = *mPtr;
return true;
}
CV_PY_TO_ENUM(cuda::Event::CreateFlags);
CV_PY_TO_ENUM(cuda::HostMem::AllocType);
CV_PY_TO_ENUM(cuda::FeatureSet);
template<>
PyObject* pyopencv_from(const cuda::GpuMat& m)
{
Ptr<cuda::GpuMat> mPtr(new cuda::GpuMat());
CV_PY_FROM_CLASS(cuda::GpuMat);
CV_PY_FROM_CLASS(cuda::Stream);
CV_PY_FROM_CLASS(cuda::HostMem);
*mPtr = m;
return pyopencv_from(mPtr);
}
CV_PY_FROM_CLASS_PTR(cuda::GpuMat::Allocator);
template<>
bool pyopencv_to(PyObject *o, cuda::GpuMat::Allocator* &allocator, const char *name)
{
(void)name;
if (!o || o == Py_None)
return true;
failmsg("Python binding for cv::cuda::GpuMat::Allocator is not implemented yet.");
return false;
}
template<>
bool pyopencv_to(PyObject *o, cuda::Stream &stream, const char *name)
{
(void)name;
if (!o || o == Py_None)
return true;
failmsg("Python binding for cv::cuda::Stream is not implemented yet.");
return false;
}
CV_PY_FROM_ENUM(cuda::DeviceInfo::ComputeMode);
#endif

View File

@ -6,7 +6,7 @@ set(the_description "CUDA-accelerated Operations on Matrices")
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 /wd4324 /wd4512 -Wundef -Wmissing-declarations -Wshadow)
ocv_add_module(cudaarithm opencv_core OPTIONAL opencv_cudev)
ocv_add_module(cudaarithm opencv_core OPTIONAL opencv_cudev WRAP python)
ocv_module_include_directories()
ocv_glob_module_sources()

View File

@ -83,7 +83,7 @@ destination array to be changed. The mask can be used only with single channel i
@sa add
*/
CV_EXPORTS void add(InputArray src1, InputArray src2, OutputArray dst, InputArray mask = noArray(), int dtype = -1, Stream& stream = Stream::Null());
CV_EXPORTS_W void add(InputArray src1, InputArray src2, OutputArray dst, InputArray mask = noArray(), int dtype = -1, Stream& stream = Stream::Null());
/** @brief Computes a matrix-matrix or matrix-scalar difference.
@ -98,7 +98,7 @@ destination array to be changed. The mask can be used only with single channel i
@sa subtract
*/
CV_EXPORTS void subtract(InputArray src1, InputArray src2, OutputArray dst, InputArray mask = noArray(), int dtype = -1, Stream& stream = Stream::Null());
CV_EXPORTS_W void subtract(InputArray src1, InputArray src2, OutputArray dst, InputArray mask = noArray(), int dtype = -1, Stream& stream = Stream::Null());
/** @brief Computes a matrix-matrix or matrix-scalar per-element product.
@ -112,7 +112,7 @@ The depth is defined by dtype or src1 depth.
@sa multiply
*/
CV_EXPORTS void multiply(InputArray src1, InputArray src2, OutputArray dst, double scale = 1, int dtype = -1, Stream& stream = Stream::Null());
CV_EXPORTS_W void multiply(InputArray src1, InputArray src2, OutputArray dst, double scale = 1, int dtype = -1, Stream& stream = Stream::Null());
/** @brief Computes a matrix-matrix or matrix-scalar division.
@ -128,7 +128,7 @@ This function, in contrast to divide, uses a round-down rounding mode.
@sa divide
*/
CV_EXPORTS void divide(InputArray src1, InputArray src2, OutputArray dst, double scale = 1, int dtype = -1, Stream& stream = Stream::Null());
CV_EXPORTS_W void divide(InputArray src1, InputArray src2, OutputArray dst, double scale = 1, int dtype = -1, Stream& stream = Stream::Null());
/** @brief Computes per-element absolute difference of two matrices (or of a matrix and scalar).
@ -139,7 +139,7 @@ CV_EXPORTS void divide(InputArray src1, InputArray src2, OutputArray dst, double
@sa absdiff
*/
CV_EXPORTS void absdiff(InputArray src1, InputArray src2, OutputArray dst, Stream& stream = Stream::Null());
CV_EXPORTS_W void absdiff(InputArray src1, InputArray src2, OutputArray dst, Stream& stream = Stream::Null());
/** @brief Computes an absolute value of each matrix element.
@ -149,7 +149,7 @@ CV_EXPORTS void absdiff(InputArray src1, InputArray src2, OutputArray dst, Strea
@sa abs
*/
CV_EXPORTS void abs(InputArray src, OutputArray dst, Stream& stream = Stream::Null());
CV_EXPORTS_W void abs(InputArray src, OutputArray dst, Stream& stream = Stream::Null());
/** @brief Computes a square value of each matrix element.
@ -157,7 +157,7 @@ CV_EXPORTS void abs(InputArray src, OutputArray dst, Stream& stream = Stream::Nu
@param dst Destination matrix with the same size and type as src .
@param stream Stream for the asynchronous version.
*/
CV_EXPORTS void sqr(InputArray src, OutputArray dst, Stream& stream = Stream::Null());
CV_EXPORTS_W void sqr(InputArray src, OutputArray dst, Stream& stream = Stream::Null());
/** @brief Computes a square root of each matrix element.
@ -167,7 +167,7 @@ CV_EXPORTS void sqr(InputArray src, OutputArray dst, Stream& stream = Stream::Nu
@sa sqrt
*/
CV_EXPORTS void sqrt(InputArray src, OutputArray dst, Stream& stream = Stream::Null());
CV_EXPORTS_W void sqrt(InputArray src, OutputArray dst, Stream& stream = Stream::Null());
/** @brief Computes an exponent of each matrix element.
@ -177,7 +177,7 @@ CV_EXPORTS void sqrt(InputArray src, OutputArray dst, Stream& stream = Stream::N
@sa exp
*/
CV_EXPORTS void exp(InputArray src, OutputArray dst, Stream& stream = Stream::Null());
CV_EXPORTS_W void exp(InputArray src, OutputArray dst, Stream& stream = Stream::Null());
/** @brief Computes a natural logarithm of absolute value of each matrix element.
@ -187,7 +187,7 @@ CV_EXPORTS void exp(InputArray src, OutputArray dst, Stream& stream = Stream::Nu
@sa log
*/
CV_EXPORTS void log(InputArray src, OutputArray dst, Stream& stream = Stream::Null());
CV_EXPORTS_W void log(InputArray src, OutputArray dst, Stream& stream = Stream::Null());
/** @brief Raises every matrix element to a power.
@ -202,7 +202,7 @@ The function pow raises every element of the input matrix to power :
@sa pow
*/
CV_EXPORTS void pow(InputArray src, double power, OutputArray dst, Stream& stream = Stream::Null());
CV_EXPORTS_W void pow(InputArray src, double power, OutputArray dst, Stream& stream = Stream::Null());
/** @brief Compares elements of two matrices (or of a matrix and scalar).
@ -220,7 +220,7 @@ CV_EXPORTS void pow(InputArray src, double power, OutputArray dst, Stream& strea
@sa compare
*/
CV_EXPORTS void compare(InputArray src1, InputArray src2, OutputArray dst, int cmpop, Stream& stream = Stream::Null());
CV_EXPORTS_W void compare(InputArray src1, InputArray src2, OutputArray dst, int cmpop, Stream& stream = Stream::Null());
/** @brief Performs a per-element bitwise inversion.
@ -230,7 +230,7 @@ CV_EXPORTS void compare(InputArray src1, InputArray src2, OutputArray dst, int c
destination array to be changed. The mask can be used only with single channel images.
@param stream Stream for the asynchronous version.
*/
CV_EXPORTS void bitwise_not(InputArray src, OutputArray dst, InputArray mask = noArray(), Stream& stream = Stream::Null());
CV_EXPORTS_W void bitwise_not(InputArray src, OutputArray dst, InputArray mask = noArray(), Stream& stream = Stream::Null());
/** @brief Performs a per-element bitwise disjunction of two matrices (or of matrix and scalar).
@ -241,7 +241,7 @@ CV_EXPORTS void bitwise_not(InputArray src, OutputArray dst, InputArray mask = n
destination array to be changed. The mask can be used only with single channel images.
@param stream Stream for the asynchronous version.
*/
CV_EXPORTS void bitwise_or(InputArray src1, InputArray src2, OutputArray dst, InputArray mask = noArray(), Stream& stream = Stream::Null());
CV_EXPORTS_W void bitwise_or(InputArray src1, InputArray src2, OutputArray dst, InputArray mask = noArray(), Stream& stream = Stream::Null());
/** @brief Performs a per-element bitwise conjunction of two matrices (or of matrix and scalar).
@ -252,7 +252,7 @@ CV_EXPORTS void bitwise_or(InputArray src1, InputArray src2, OutputArray dst, In
destination array to be changed. The mask can be used only with single channel images.
@param stream Stream for the asynchronous version.
*/
CV_EXPORTS void bitwise_and(InputArray src1, InputArray src2, OutputArray dst, InputArray mask = noArray(), Stream& stream = Stream::Null());
CV_EXPORTS_W void bitwise_and(InputArray src1, InputArray src2, OutputArray dst, InputArray mask = noArray(), Stream& stream = Stream::Null());
/** @brief Performs a per-element bitwise exclusive or operation of two matrices (or of matrix and scalar).
@ -263,7 +263,7 @@ CV_EXPORTS void bitwise_and(InputArray src1, InputArray src2, OutputArray dst, I
destination array to be changed. The mask can be used only with single channel images.
@param stream Stream for the asynchronous version.
*/
CV_EXPORTS void bitwise_xor(InputArray src1, InputArray src2, OutputArray dst, InputArray mask = noArray(), Stream& stream = Stream::Null());
CV_EXPORTS_W void bitwise_xor(InputArray src1, InputArray src2, OutputArray dst, InputArray mask = noArray(), Stream& stream = Stream::Null());
/** @brief Performs pixel by pixel right shift of an image by a constant value.
@ -293,7 +293,7 @@ CV_EXPORTS void lshift(InputArray src, Scalar_<int> val, OutputArray dst, Stream
@sa min
*/
CV_EXPORTS void min(InputArray src1, InputArray src2, OutputArray dst, Stream& stream = Stream::Null());
CV_EXPORTS_W void min(InputArray src1, InputArray src2, OutputArray dst, Stream& stream = Stream::Null());
/** @brief Computes the per-element maximum of two matrices (or a matrix and a scalar).
@ -304,7 +304,7 @@ CV_EXPORTS void min(InputArray src1, InputArray src2, OutputArray dst, Stream& s
@sa max
*/
CV_EXPORTS void max(InputArray src1, InputArray src2, OutputArray dst, Stream& stream = Stream::Null());
CV_EXPORTS_W void max(InputArray src1, InputArray src2, OutputArray dst, Stream& stream = Stream::Null());
/** @brief Computes the weighted sum of two arrays.
@ -327,7 +327,7 @@ channel is processed independently.
@sa addWeighted
*/
CV_EXPORTS void addWeighted(InputArray src1, double alpha, InputArray src2, double beta, double gamma, OutputArray dst,
CV_EXPORTS_W void addWeighted(InputArray src1, double alpha, InputArray src2, double beta, double gamma, OutputArray dst,
int dtype = -1, Stream& stream = Stream::Null());
//! adds scaled array to another one (dst = alpha*src1 + src2)
@ -348,7 +348,7 @@ threshold types are not supported.
@sa threshold
*/
CV_EXPORTS double threshold(InputArray src, OutputArray dst, double thresh, double maxval, int type, Stream& stream = Stream::Null());
CV_EXPORTS_W double threshold(InputArray src, OutputArray dst, double thresh, double maxval, int type, Stream& stream = Stream::Null());
/** @brief Computes magnitudes of complex matrix elements.
@ -358,7 +358,7 @@ CV_EXPORTS double threshold(InputArray src, OutputArray dst, double thresh, doub
@sa magnitude
*/
CV_EXPORTS void magnitude(InputArray xy, OutputArray magnitude, Stream& stream = Stream::Null());
CV_EXPORTS_W void magnitude(InputArray xy, OutputArray magnitude, Stream& stream = Stream::Null());
/** @brief Computes squared magnitudes of complex matrix elements.
@ -366,7 +366,7 @@ CV_EXPORTS void magnitude(InputArray xy, OutputArray magnitude, Stream& stream =
@param magnitude Destination matrix of float magnitude squares ( CV_32FC1 ).
@param stream Stream for the asynchronous version.
*/
CV_EXPORTS void magnitudeSqr(InputArray xy, OutputArray magnitude, Stream& stream = Stream::Null());
CV_EXPORTS_W void magnitudeSqr(InputArray xy, OutputArray magnitude, Stream& stream = Stream::Null());
/** @overload
computes magnitude of each (x(i), y(i)) vector
@ -376,7 +376,7 @@ CV_EXPORTS void magnitudeSqr(InputArray xy, OutputArray magnitude, Stream& strea
@param magnitude Destination matrix of float magnitudes ( CV_32FC1 ).
@param stream Stream for the asynchronous version.
*/
CV_EXPORTS void magnitude(InputArray x, InputArray y, OutputArray magnitude, Stream& stream = Stream::Null());
CV_EXPORTS_W void magnitude(InputArray x, InputArray y, OutputArray magnitude, Stream& stream = Stream::Null());
/** @overload
computes squared magnitude of each (x(i), y(i)) vector
@ -386,7 +386,7 @@ CV_EXPORTS void magnitude(InputArray x, InputArray y, OutputArray magnitude, Str
@param magnitude Destination matrix of float magnitude squares ( CV_32FC1 ).
@param stream Stream for the asynchronous version.
*/
CV_EXPORTS void magnitudeSqr(InputArray x, InputArray y, OutputArray magnitude, Stream& stream = Stream::Null());
CV_EXPORTS_W void magnitudeSqr(InputArray x, InputArray y, OutputArray magnitude, Stream& stream = Stream::Null());
/** @brief Computes polar angles of complex matrix elements.
@ -398,7 +398,7 @@ CV_EXPORTS void magnitudeSqr(InputArray x, InputArray y, OutputArray magnitude,
@sa phase
*/
CV_EXPORTS void phase(InputArray x, InputArray y, OutputArray angle, bool angleInDegrees = false, Stream& stream = Stream::Null());
CV_EXPORTS_W void phase(InputArray x, InputArray y, OutputArray angle, bool angleInDegrees = false, Stream& stream = Stream::Null());
/** @brief Converts Cartesian coordinates into polar.
@ -411,7 +411,7 @@ CV_EXPORTS void phase(InputArray x, InputArray y, OutputArray angle, bool angleI
@sa cartToPolar
*/
CV_EXPORTS void cartToPolar(InputArray x, InputArray y, OutputArray magnitude, OutputArray angle, bool angleInDegrees = false, Stream& stream = Stream::Null());
CV_EXPORTS_W void cartToPolar(InputArray x, InputArray y, OutputArray magnitude, OutputArray angle, bool angleInDegrees = false, Stream& stream = Stream::Null());
/** @brief Converts polar coordinates into Cartesian.
@ -422,7 +422,7 @@ CV_EXPORTS void cartToPolar(InputArray x, InputArray y, OutputArray magnitude, O
@param angleInDegrees Flag that indicates angles in degrees.
@param stream Stream for the asynchronous version.
*/
CV_EXPORTS void polarToCart(InputArray magnitude, InputArray angle, OutputArray x, OutputArray y, bool angleInDegrees = false, Stream& stream = Stream::Null());
CV_EXPORTS_W void polarToCart(InputArray magnitude, InputArray angle, OutputArray x, OutputArray y, bool angleInDegrees = false, Stream& stream = Stream::Null());
//! @} cudaarithm_elem
@ -438,9 +438,9 @@ CV_EXPORTS void polarToCart(InputArray magnitude, InputArray angle, OutputArray
@sa merge
*/
CV_EXPORTS void merge(const GpuMat* src, size_t n, OutputArray dst, Stream& stream = Stream::Null());
CV_EXPORTS_W void merge(const GpuMat* src, size_t n, OutputArray dst, Stream& stream = Stream::Null());
/** @overload */
CV_EXPORTS void merge(const std::vector<GpuMat>& src, OutputArray dst, Stream& stream = Stream::Null());
CV_EXPORTS_W void merge(const std::vector<GpuMat>& src, OutputArray dst, Stream& stream = Stream::Null());
/** @brief Copies each plane of a multi-channel matrix into an array.
@ -450,9 +450,9 @@ CV_EXPORTS void merge(const std::vector<GpuMat>& src, OutputArray dst, Stream& s
@sa split
*/
CV_EXPORTS void split(InputArray src, GpuMat* dst, Stream& stream = Stream::Null());
CV_EXPORTS_W void split(InputArray src, GpuMat* dst, Stream& stream = Stream::Null());
/** @overload */
CV_EXPORTS void split(InputArray src, std::vector<GpuMat>& dst, Stream& stream = Stream::Null());
CV_EXPORTS_W void split(InputArray src, std::vector<GpuMat>& dst, Stream& stream = Stream::Null());
/** @brief Transposes a matrix.
@ -462,7 +462,7 @@ CV_EXPORTS void split(InputArray src, std::vector<GpuMat>& dst, Stream& stream =
@sa transpose
*/
CV_EXPORTS void transpose(InputArray src1, OutputArray dst, Stream& stream = Stream::Null());
CV_EXPORTS_W void transpose(InputArray src1, OutputArray dst, Stream& stream = Stream::Null());
/** @brief Flips a 2D matrix around vertical, horizontal, or both axes.
@ -477,11 +477,11 @@ CV_32F depth.
@sa flip
*/
CV_EXPORTS void flip(InputArray src, OutputArray dst, int flipCode, Stream& stream = Stream::Null());
CV_EXPORTS_W void flip(InputArray src, OutputArray dst, int flipCode, Stream& stream = Stream::Null());
/** @brief Base class for transform using lookup table.
*/
class CV_EXPORTS LookUpTable : public Algorithm
class CV_EXPORTS_W LookUpTable : public Algorithm
{
public:
/** @brief Transforms the source matrix into the destination matrix using the given look-up table:
@ -491,14 +491,14 @@ public:
@param dst Destination matrix.
@param stream Stream for the asynchronous version.
*/
virtual void transform(InputArray src, OutputArray dst, Stream& stream = Stream::Null()) = 0;
CV_WRAP virtual void transform(InputArray src, OutputArray dst, Stream& stream = Stream::Null()) = 0;
};
/** @brief Creates implementation for cuda::LookUpTable .
@param lut Look-up table of 256 elements. It is a continuous CV_8U matrix.
*/
CV_EXPORTS Ptr<LookUpTable> createLookUpTable(InputArray lut);
CV_EXPORTS_W Ptr<LookUpTable> createLookUpTable(InputArray lut);
/** @brief Forms a border around an image.
@ -515,7 +515,7 @@ BORDER_REPLICATE , BORDER_CONSTANT , BORDER_REFLECT and BORDER_WRAP are supporte
@param value Border value.
@param stream Stream for the asynchronous version.
*/
CV_EXPORTS void copyMakeBorder(InputArray src, OutputArray dst, int top, int bottom, int left, int right, int borderType,
CV_EXPORTS_W void copyMakeBorder(InputArray src, OutputArray dst, int top, int bottom, int left, int right, int borderType,
Scalar value = Scalar(), Stream& stream = Stream::Null());
//! @} cudaarithm_core
@ -531,9 +531,9 @@ CV_EXPORTS void copyMakeBorder(InputArray src, OutputArray dst, int top, int bot
@sa norm
*/
CV_EXPORTS double norm(InputArray src1, int normType, InputArray mask = noArray());
CV_EXPORTS_W double norm(InputArray src1, int normType, InputArray mask = noArray());
/** @overload */
CV_EXPORTS void calcNorm(InputArray src, OutputArray dst, int normType, InputArray mask = noArray(), Stream& stream = Stream::Null());
CV_EXPORTS_W void calcNorm(InputArray src, OutputArray dst, int normType, InputArray mask = noArray(), Stream& stream = Stream::Null());
/** @brief Returns the difference of two matrices.
@ -543,9 +543,9 @@ CV_EXPORTS void calcNorm(InputArray src, OutputArray dst, int normType, InputArr
@sa norm
*/
CV_EXPORTS double norm(InputArray src1, InputArray src2, int normType=NORM_L2);
CV_EXPORTS_W double norm(InputArray src1, InputArray src2, int normType=NORM_L2);
/** @overload */
CV_EXPORTS void calcNormDiff(InputArray src1, InputArray src2, OutputArray dst, int normType=NORM_L2, Stream& stream = Stream::Null());
CV_EXPORTS_W void calcNormDiff(InputArray src1, InputArray src2, OutputArray dst, int normType=NORM_L2, Stream& stream = Stream::Null());
/** @brief Returns the sum of matrix elements.
@ -554,27 +554,27 @@ CV_EXPORTS void calcNormDiff(InputArray src1, InputArray src2, OutputArray dst,
@sa sum
*/
CV_EXPORTS Scalar sum(InputArray src, InputArray mask = noArray());
CV_EXPORTS_W Scalar sum(InputArray src, InputArray mask = noArray());
/** @overload */
CV_EXPORTS void calcSum(InputArray src, OutputArray dst, InputArray mask = noArray(), Stream& stream = Stream::Null());
CV_EXPORTS_W void calcSum(InputArray src, OutputArray dst, InputArray mask = noArray(), Stream& stream = Stream::Null());
/** @brief Returns the sum of absolute values for matrix elements.
@param src Source image of any depth except for CV_64F .
@param mask optional operation mask; it must have the same size as src1 and CV_8UC1 type.
*/
CV_EXPORTS Scalar absSum(InputArray src, InputArray mask = noArray());
CV_EXPORTS_W Scalar absSum(InputArray src, InputArray mask = noArray());
/** @overload */
CV_EXPORTS void calcAbsSum(InputArray src, OutputArray dst, InputArray mask = noArray(), Stream& stream = Stream::Null());
CV_EXPORTS_W void calcAbsSum(InputArray src, OutputArray dst, InputArray mask = noArray(), Stream& stream = Stream::Null());
/** @brief Returns the squared sum of matrix elements.
@param src Source image of any depth except for CV_64F .
@param mask optional operation mask; it must have the same size as src1 and CV_8UC1 type.
*/
CV_EXPORTS Scalar sqrSum(InputArray src, InputArray mask = noArray());
CV_EXPORTS_W Scalar sqrSum(InputArray src, InputArray mask = noArray());
/** @overload */
CV_EXPORTS void calcSqrSum(InputArray src, OutputArray dst, InputArray mask = noArray(), Stream& stream = Stream::Null());
CV_EXPORTS_W void calcSqrSum(InputArray src, OutputArray dst, InputArray mask = noArray(), Stream& stream = Stream::Null());
/** @brief Finds global minimum and maximum matrix elements and returns their values.
@ -587,9 +587,9 @@ The function does not work with CV_64F images on GPUs with the compute capabilit
@sa minMaxLoc
*/
CV_EXPORTS void minMax(InputArray src, double* minVal, double* maxVal, InputArray mask = noArray());
CV_EXPORTS_W void minMax(InputArray src, double* minVal, double* maxVal, InputArray mask = noArray());
/** @overload */
CV_EXPORTS void findMinMax(InputArray src, OutputArray dst, InputArray mask = noArray(), Stream& stream = Stream::Null());
CV_EXPORTS_W void findMinMax(InputArray src, OutputArray dst, InputArray mask = noArray(), Stream& stream = Stream::Null());
/** @brief Finds global minimum and maximum matrix elements and returns their values with locations.
@ -604,10 +604,10 @@ The function does not work with CV_64F images on GPU with the compute capability
@sa minMaxLoc
*/
CV_EXPORTS void minMaxLoc(InputArray src, double* minVal, double* maxVal, Point* minLoc, Point* maxLoc,
CV_EXPORTS_W void minMaxLoc(InputArray src, double* minVal, double* maxVal, Point* minLoc, Point* maxLoc,
InputArray mask = noArray());
/** @overload */
CV_EXPORTS void findMinMaxLoc(InputArray src, OutputArray minMaxVals, OutputArray loc,
CV_EXPORTS_W void findMinMaxLoc(InputArray src, OutputArray minMaxVals, OutputArray loc,
InputArray mask = noArray(), Stream& stream = Stream::Null());
/** @brief Counts non-zero matrix elements.
@ -618,9 +618,9 @@ The function does not work with CV_64F images on GPUs with the compute capabilit
@sa countNonZero
*/
CV_EXPORTS int countNonZero(InputArray src);
CV_EXPORTS_W int countNonZero(InputArray src);
/** @overload */
CV_EXPORTS void countNonZero(InputArray src, OutputArray dst, Stream& stream = Stream::Null());
CV_EXPORTS_W void countNonZero(InputArray src, OutputArray dst, Stream& stream = Stream::Null());
/** @brief Reduces a matrix to a vector.
@ -648,7 +648,7 @@ modes.
@sa reduce
*/
CV_EXPORTS void reduce(InputArray mtx, OutputArray vec, int dim, int reduceOp, int dtype = -1, Stream& stream = Stream::Null());
CV_EXPORTS_W void reduce(InputArray mtx, OutputArray vec, int dim, int reduceOp, int dtype = -1, Stream& stream = Stream::Null());
/** @brief Computes a mean value and a standard deviation of matrix elements.
@ -658,9 +658,9 @@ CV_EXPORTS void reduce(InputArray mtx, OutputArray vec, int dim, int reduceOp, i
@sa meanStdDev
*/
CV_EXPORTS void meanStdDev(InputArray mtx, Scalar& mean, Scalar& stddev);
CV_EXPORTS_W void meanStdDev(InputArray mtx, Scalar& mean, Scalar& stddev);
/** @overload */
CV_EXPORTS void meanStdDev(InputArray mtx, OutputArray dst, Stream& stream = Stream::Null());
CV_EXPORTS_W void meanStdDev(InputArray mtx, OutputArray dst, Stream& stream = Stream::Null());
/** @brief Computes a standard deviation of integral images.
@ -670,7 +670,7 @@ CV_EXPORTS void meanStdDev(InputArray mtx, OutputArray dst, Stream& stream = Str
@param rect Rectangular window.
@param stream Stream for the asynchronous version.
*/
CV_EXPORTS void rectStdDev(InputArray src, InputArray sqr, OutputArray dst, Rect rect, Stream& stream = Stream::Null());
CV_EXPORTS_W void rectStdDev(InputArray src, InputArray sqr, OutputArray dst, Rect rect, Stream& stream = Stream::Null());
/** @brief Normalizes the norm or value range of an array.
@ -688,7 +688,7 @@ number of channels as src and the depth =CV_MAT_DEPTH(dtype).
@sa normalize
*/
CV_EXPORTS void normalize(InputArray src, OutputArray dst, double alpha, double beta,
CV_EXPORTS_W void normalize(InputArray src, OutputArray dst, double alpha, double beta,
int norm_type, int dtype, InputArray mask = noArray(),
Stream& stream = Stream::Null());
@ -700,7 +700,7 @@ CV_EXPORTS void normalize(InputArray src, OutputArray dst, double alpha, double
@sa integral
*/
CV_EXPORTS void integral(InputArray src, OutputArray sum, Stream& stream = Stream::Null());
CV_EXPORTS_W void integral(InputArray src, OutputArray sum, Stream& stream = Stream::Null());
/** @brief Computes a squared integral image.
@ -709,7 +709,7 @@ CV_EXPORTS void integral(InputArray src, OutputArray sum, Stream& stream = Strea
CV_64FC1 .
@param stream Stream for the asynchronous version.
*/
CV_EXPORTS void sqrIntegral(InputArray src, OutputArray sqsum, Stream& stream = Stream::Null());
CV_EXPORTS_W void sqrIntegral(InputArray src, OutputArray sqsum, Stream& stream = Stream::Null());
//! @} cudaarithm_reduce
@ -741,7 +741,7 @@ The function performs generalized matrix multiplication similar to the gemm func
@sa gemm
*/
CV_EXPORTS void gemm(InputArray src1, InputArray src2, double alpha,
CV_EXPORTS_W void gemm(InputArray src1, InputArray src2, double alpha,
InputArray src3, double beta, OutputArray dst, int flags = 0, Stream& stream = Stream::Null());
/** @brief Performs a per-element multiplication of two Fourier spectrums.
@ -758,7 +758,7 @@ Only full (not packed) CV_32FC2 complex spectrums in the interleaved format are
@sa mulSpectrums
*/
CV_EXPORTS void mulSpectrums(InputArray src1, InputArray src2, OutputArray dst, int flags, bool conjB=false, Stream& stream = Stream::Null());
CV_EXPORTS_W void mulSpectrums(InputArray src1, InputArray src2, OutputArray dst, int flags, bool conjB=false, Stream& stream = Stream::Null());
/** @brief Performs a per-element multiplication of two Fourier spectrums and scales the result.
@ -775,7 +775,7 @@ Only full (not packed) CV_32FC2 complex spectrums in the interleaved format are
@sa mulSpectrums
*/
CV_EXPORTS void mulAndScaleSpectrums(InputArray src1, InputArray src2, OutputArray dst, int flags, float scale, bool conjB=false, Stream& stream = Stream::Null());
CV_EXPORTS_W void mulAndScaleSpectrums(InputArray src1, InputArray src2, OutputArray dst, int flags, float scale, bool conjB=false, Stream& stream = Stream::Null());
/** @brief Performs a forward or inverse discrete Fourier transform (1D or 2D) of the floating point matrix.
@ -812,11 +812,11 @@ instead of the width.
@sa dft
*/
CV_EXPORTS void dft(InputArray src, OutputArray dst, Size dft_size, int flags=0, Stream& stream = Stream::Null());
CV_EXPORTS_W void dft(InputArray src, OutputArray dst, Size dft_size, int flags=0, Stream& stream = Stream::Null());
/** @brief Base class for DFT operator as a cv::Algorithm. :
*/
class CV_EXPORTS DFT : public Algorithm
class CV_EXPORTS_W DFT : public Algorithm
{
public:
/** @brief Computes an FFT of a given image.
@ -825,7 +825,7 @@ public:
@param result Result image.
@param stream Stream for the asynchronous version.
*/
virtual void compute(InputArray image, OutputArray result, Stream& stream = Stream::Null()) = 0;
CV_WRAP virtual void compute(InputArray image, OutputArray result, Stream& stream = Stream::Null()) = 0;
};
/** @brief Creates implementation for cuda::DFT.
@ -841,11 +841,11 @@ cases are always forward and inverse, respectively).
- **DFT_REAL_OUTPUT** specifies the output as real. The source matrix is the result of
real-complex transform, so the destination matrix must be real.
*/
CV_EXPORTS Ptr<DFT> createDFT(Size dft_size, int flags);
CV_EXPORTS_W Ptr<DFT> createDFT(Size dft_size, int flags);
/** @brief Base class for convolution (or cross-correlation) operator. :
*/
class CV_EXPORTS Convolution : public Algorithm
class CV_EXPORTS_W Convolution : public Algorithm
{
public:
/** @brief Computes a convolution (or cross-correlation) of two images.
@ -867,7 +867,7 @@ public:
estimation of block size will be used (which is optimized for speed). By varying user_block_size
you can reduce memory requirements at the cost of speed.
*/
CV_EXPORTS Ptr<Convolution> createConvolution(Size user_block_size = Size());
CV_EXPORTS_W Ptr<Convolution> createConvolution(Size user_block_size = Size());
//! @} cudaarithm_arithm

View File

@ -77,27 +77,27 @@ class implements algorithm described in @cite MOG2001 .
- An example on gaussian mixture based background/foreground segmantation can be found at
opencv_source_code/samples/gpu/bgfg_segm.cpp
*/
class CV_EXPORTS BackgroundSubtractorMOG : public cv::BackgroundSubtractor
class CV_EXPORTS_W BackgroundSubtractorMOG : public cv::BackgroundSubtractor
{
public:
using cv::BackgroundSubtractor::apply;
virtual void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream) = 0;
CV_WRAP virtual void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream) = 0;
using cv::BackgroundSubtractor::getBackgroundImage;
virtual void getBackgroundImage(OutputArray backgroundImage, Stream& stream) const = 0;
CV_WRAP virtual void getBackgroundImage(OutputArray backgroundImage, Stream& stream) const = 0;
virtual int getHistory() const = 0;
virtual void setHistory(int nframes) = 0;
CV_WRAP virtual int getHistory() const = 0;
CV_WRAP virtual void setHistory(int nframes) = 0;
virtual int getNMixtures() const = 0;
virtual void setNMixtures(int nmix) = 0;
CV_WRAP virtual int getNMixtures() const = 0;
CV_WRAP virtual void setNMixtures(int nmix) = 0;
virtual double getBackgroundRatio() const = 0;
virtual void setBackgroundRatio(double backgroundRatio) = 0;
CV_WRAP virtual double getBackgroundRatio() const = 0;
CV_WRAP virtual void setBackgroundRatio(double backgroundRatio) = 0;
virtual double getNoiseSigma() const = 0;
virtual void setNoiseSigma(double noiseSigma) = 0;
CV_WRAP virtual double getNoiseSigma() const = 0;
CV_WRAP virtual void setNoiseSigma(double noiseSigma) = 0;
};
/** @brief Creates mixture-of-gaussian background subtractor
@ -108,7 +108,7 @@ public:
@param noiseSigma Noise strength (standard deviation of the brightness or each color channel). 0
means some automatic value.
*/
CV_EXPORTS Ptr<cuda::BackgroundSubtractorMOG>
CV_EXPORTS_W Ptr<cuda::BackgroundSubtractorMOG>
createBackgroundSubtractorMOG(int history = 200, int nmixtures = 5,
double backgroundRatio = 0.7, double noiseSigma = 0);
@ -123,15 +123,15 @@ class implements algorithm described in @cite Zivkovic2004 .
@sa BackgroundSubtractorMOG2
*/
class CV_EXPORTS BackgroundSubtractorMOG2 : public cv::BackgroundSubtractorMOG2
class CV_EXPORTS_W BackgroundSubtractorMOG2 : public cv::BackgroundSubtractorMOG2
{
public:
using cv::BackgroundSubtractorMOG2::apply;
using cv::BackgroundSubtractorMOG2::getBackgroundImage;
virtual void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream) = 0;
CV_WRAP virtual void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream) = 0;
virtual void getBackgroundImage(OutputArray backgroundImage, Stream& stream) const = 0;
CV_WRAP virtual void getBackgroundImage(OutputArray backgroundImage, Stream& stream) const = 0;
};
/** @brief Creates MOG2 Background Subtractor
@ -143,7 +143,7 @@ affect the background update.
@param detectShadows If true, the algorithm will detect shadows and mark them. It decreases the
speed a bit, so if you do not need this feature, set the parameter to false.
*/
CV_EXPORTS Ptr<cuda::BackgroundSubtractorMOG2>
CV_EXPORTS_W Ptr<cuda::BackgroundSubtractorMOG2>
createBackgroundSubtractorMOG2(int history = 500, double varThreshold = 16,
bool detectShadows = true);

View File

@ -6,7 +6,7 @@ set(the_description "CUDA-accelerated Video Encoding/Decoding")
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 /wd4324 /wd4512 -Wundef -Wshadow)
ocv_add_module(cudacodec opencv_core opencv_videoio OPTIONAL opencv_cudev)
ocv_add_module(cudacodec opencv_core opencv_videoio OPTIONAL opencv_cudev WRAP python)
ocv_module_include_directories()
ocv_glob_module_sources()

View File

@ -80,7 +80,7 @@ enum SurfaceFormat
/** @brief Different parameters for CUDA video encoder.
*/
struct CV_EXPORTS EncoderParams
struct CV_EXPORTS_W EncoderParams
{
int P_Interval; //!< NVVE_P_INTERVAL,
int IDR_Period; //!< NVVE_IDR_PERIOD,
@ -125,7 +125,7 @@ struct CV_EXPORTS EncoderParams
/** @brief Callbacks for CUDA video encoder.
*/
class CV_EXPORTS EncoderCallBack
class CV_EXPORTS_W EncoderCallBack
{
public:
enum PicType
@ -152,14 +152,14 @@ public:
@param frameNumber
@param picType Specify frame type (I-Frame, P-Frame or B-Frame).
*/
virtual void onBeginFrame(int frameNumber, PicType picType) = 0;
CV_WRAP virtual void onBeginFrame(int frameNumber, EncoderCallBack::PicType picType) = 0;
/** @brief Callback function signals that the encoding operation on the frame has finished.
@param frameNumber
@param picType Specify frame type (I-Frame, P-Frame or B-Frame).
*/
virtual void onEndFrame(int frameNumber, PicType picType) = 0;
CV_WRAP virtual void onEndFrame(int frameNumber, EncoderCallBack::PicType picType) = 0;
};
/** @brief Video writer interface.
@ -172,7 +172,7 @@ The implementation uses H264 video codec.
- An example on how to use the videoWriter class can be found at
opencv_source_code/samples/gpu/video_writer.cpp
*/
class CV_EXPORTS VideoWriter
class CV_EXPORTS_W VideoWriter
{
public:
virtual ~VideoWriter() {}
@ -185,9 +185,9 @@ public:
The method write the specified image to video file. The image must have the same size and the same
surface format as has been specified when opening the video writer.
*/
virtual void write(InputArray frame, bool lastFrame = false) = 0;
CV_WRAP virtual void write(InputArray frame, bool lastFrame = false) = 0;
virtual EncoderParams getEncoderParams() const = 0;
CV_WRAP virtual EncoderParams getEncoderParams() const = 0;
};
/** @brief Creates video writer.
@ -202,7 +202,7 @@ encoding, frames with other formats will be used as is.
The constructors initialize video writer. FFMPEG is used to write videos. User can implement own
multiplexing with cudacodec::EncoderCallBack .
*/
CV_EXPORTS Ptr<VideoWriter> createVideoWriter(const String& fileName, Size frameSize, double fps, SurfaceFormat format = SF_BGR);
CV_EXPORTS_W Ptr<cudacodec::VideoWriter> createVideoWriter(const String& fileName, Size frameSize, double fps, SurfaceFormat format = SF_BGR);
/** @overload
@param fileName Name of the output video file. Only AVI file format is supported.
@param frameSize Size of the input video frames.
@ -212,7 +212,7 @@ CV_EXPORTS Ptr<VideoWriter> createVideoWriter(const String& fileName, Size frame
SF_IYUV , SF_BGR or SF_GRAY). BGR or gray frames will be converted to YV12 format before
encoding, frames with other formats will be used as is.
*/
CV_EXPORTS Ptr<VideoWriter> createVideoWriter(const String& fileName, Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR);
CV_EXPORTS_W Ptr<cudacodec::VideoWriter> createVideoWriter(const String& fileName, Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR);
/** @overload
@param encoderCallback Callbacks for video encoder. See cudacodec::EncoderCallBack . Use it if you
@ -223,7 +223,7 @@ want to work with raw video stream.
SF_IYUV , SF_BGR or SF_GRAY). BGR or gray frames will be converted to YV12 format before
encoding, frames with other formats will be used as is.
*/
CV_EXPORTS Ptr<VideoWriter> createVideoWriter(const Ptr<EncoderCallBack>& encoderCallback, Size frameSize, double fps, SurfaceFormat format = SF_BGR);
CV_EXPORTS_W Ptr<cudacodec::VideoWriter> createVideoWriter(const Ptr<EncoderCallBack>& encoderCallback, Size frameSize, double fps, SurfaceFormat format = SF_BGR);
/** @overload
@param encoderCallback Callbacks for video encoder. See cudacodec::EncoderCallBack . Use it if you
want to work with raw video stream.
@ -234,7 +234,7 @@ want to work with raw video stream.
SF_IYUV , SF_BGR or SF_GRAY). BGR or gray frames will be converted to YV12 format before
encoding, frames with other formats will be used as is.
*/
CV_EXPORTS Ptr<VideoWriter> createVideoWriter(const Ptr<EncoderCallBack>& encoderCallback, Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR);
CV_EXPORTS_W Ptr<cudacodec::VideoWriter> createVideoWriter(const Ptr<EncoderCallBack>& encoderCallback, Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR);
////////////////////////////////// Video Decoding //////////////////////////////////////////
@ -284,7 +284,7 @@ struct FormatInfo
- An example on how to use the videoReader class can be found at
opencv_source_code/samples/gpu/video_reader.cpp
*/
class CV_EXPORTS VideoReader
class CV_EXPORTS_W VideoReader
{
public:
virtual ~VideoReader() {}
@ -294,7 +294,7 @@ public:
If no frames has been grabbed (there are no more frames in video file), the methods return false .
The method throws Exception if error occurs.
*/
virtual bool nextFrame(OutputArray frame) = 0;
CV_WRAP virtual bool nextFrame(OutputArray frame) = 0;
/** @brief Returns information about video file format.
*/
@ -305,7 +305,7 @@ public:
User can implement own demultiplexing by implementing this interface.
*/
class CV_EXPORTS RawVideoSource
class CV_EXPORTS_W RawVideoSource
{
public:
virtual ~RawVideoSource() {}
@ -329,11 +329,11 @@ public:
FFMPEG is used to read videos. User can implement own demultiplexing with cudacodec::RawVideoSource
*/
CV_EXPORTS Ptr<VideoReader> createVideoReader(const String& filename);
CV_EXPORTS_W Ptr<VideoReader> createVideoReader(const String& filename);
/** @overload
@param source RAW video source implemented by user.
*/
CV_EXPORTS Ptr<VideoReader> createVideoReader(const Ptr<RawVideoSource>& source);
CV_EXPORTS_W Ptr<VideoReader> createVideoReader(const Ptr<RawVideoSource>& source);
//! @}

View File

@ -0,0 +1,14 @@
#ifdef HAVE_OPENCV_CUDACODEC
#include "opencv2/cudacodec.hpp"
typedef cudacodec::EncoderCallBack::PicType EncoderCallBack_PicType;
CV_PY_TO_CLASS(cudacodec::EncoderParams);
CV_PY_TO_ENUM(cudacodec::EncoderCallBack::PicType);
CV_PY_TO_ENUM(cudacodec::SurfaceFormat);
CV_PY_FROM_CLASS(cudacodec::EncoderParams);
#endif

View File

@ -72,7 +72,7 @@ namespace cv { namespace cuda {
It has two groups of match methods: for matching descriptors of an image with another image or with
an image set.
*/
class CV_EXPORTS DescriptorMatcher : public cv::Algorithm
class CV_EXPORTS_W DescriptorMatcher : public cv::Algorithm
{
public:
//
@ -89,7 +89,7 @@ public:
preferable choices for SIFT and SURF descriptors, NORM_HAMMING should be used with ORB, BRISK and
BRIEF).
*/
static Ptr<DescriptorMatcher> createBFMatcher(int normType = cv::NORM_L2);
CV_WRAP static Ptr<cuda::DescriptorMatcher> createBFMatcher(int normType = cv::NORM_L2);
//
// Utility
@ -97,7 +97,7 @@ public:
/** @brief Returns true if the descriptor matcher supports masking permissible matches.
*/
virtual bool isMaskSupported() const = 0;
CV_WRAP virtual bool isMaskSupported() const = 0;
//
// Descriptor collection
@ -110,26 +110,26 @@ public:
@param descriptors Descriptors to add. Each descriptors[i] is a set of descriptors from the same
train image.
*/
virtual void add(const std::vector<GpuMat>& descriptors) = 0;
CV_WRAP virtual void add(const std::vector<GpuMat>& descriptors) = 0;
/** @brief Returns a constant link to the train descriptor collection.
*/
virtual const std::vector<GpuMat>& getTrainDescriptors() const = 0;
CV_WRAP virtual const std::vector<GpuMat>& getTrainDescriptors() const = 0;
/** @brief Clears the train descriptor collection.
*/
virtual void clear() = 0;
CV_WRAP virtual void clear() = 0;
/** @brief Returns true if there are no train descriptors in the collection.
*/
virtual bool empty() const = 0;
CV_WRAP virtual bool empty() const = 0;
/** @brief Trains a descriptor matcher.
Trains a descriptor matcher (for example, the flann index). In all methods to match, the method
train() is run every time before matching.
*/
virtual void train() = 0;
CV_WRAP virtual void train() = 0;
//
// 1 to 1 match
@ -151,14 +151,14 @@ public:
matched. Namely, queryDescriptors[i] can be matched with trainDescriptors[j] only if
mask.at\<uchar\>(i,j) is non-zero.
*/
virtual void match(InputArray queryDescriptors, InputArray trainDescriptors,
std::vector<DMatch>& matches,
CV_WRAP virtual void match(InputArray queryDescriptors, InputArray trainDescriptors,
CV_OUT std::vector<DMatch>& matches,
InputArray mask = noArray()) = 0;
/** @overload
*/
virtual void match(InputArray queryDescriptors,
std::vector<DMatch>& matches,
CV_WRAP virtual void match(InputArray queryDescriptors,
CV_OUT std::vector<DMatch>& matches,
const std::vector<GpuMat>& masks = std::vector<GpuMat>()) = 0;
/** @brief Finds the best match for each descriptor from a query set (asynchronous version).
@ -178,14 +178,14 @@ public:
matched. Namely, queryDescriptors[i] can be matched with trainDescriptors[j] only if
mask.at\<uchar\>(i,j) is non-zero.
*/
virtual void matchAsync(InputArray queryDescriptors, InputArray trainDescriptors,
CV_WRAP virtual void matchAsync(InputArray queryDescriptors, InputArray trainDescriptors,
OutputArray matches,
InputArray mask = noArray(),
Stream& stream = Stream::Null()) = 0;
/** @overload
*/
virtual void matchAsync(InputArray queryDescriptors,
CV_WRAP virtual void matchAsync(InputArray queryDescriptors,
OutputArray matches,
const std::vector<GpuMat>& masks = std::vector<GpuMat>(),
Stream& stream = Stream::Null()) = 0;
@ -198,8 +198,8 @@ public:
@param gpu_matches Matches, returned from DescriptorMatcher::matchAsync.
@param matches Vector of DMatch objects.
*/
virtual void matchConvert(InputArray gpu_matches,
std::vector<DMatch>& matches) = 0;
CV_WRAP virtual void matchConvert(InputArray gpu_matches,
CV_OUT std::vector<DMatch>& matches) = 0;
//
// knn match
@ -223,16 +223,16 @@ public:
descriptor. The matches are returned in the distance increasing order. See DescriptorMatcher::match
for the details about query and train descriptors.
*/
virtual void knnMatch(InputArray queryDescriptors, InputArray trainDescriptors,
std::vector<std::vector<DMatch> >& matches,
CV_WRAP virtual void knnMatch(InputArray queryDescriptors, InputArray trainDescriptors,
CV_OUT std::vector<std::vector<DMatch> >& matches,
int k,
InputArray mask = noArray(),
bool compactResult = false) = 0;
/** @overload
*/
virtual void knnMatch(InputArray queryDescriptors,
std::vector<std::vector<DMatch> >& matches,
CV_WRAP virtual void knnMatch(InputArray queryDescriptors,
CV_OUT std::vector<std::vector<DMatch> >& matches,
int k,
const std::vector<GpuMat>& masks = std::vector<GpuMat>(),
bool compactResult = false) = 0;
@ -254,7 +254,7 @@ public:
descriptor. The matches are returned in the distance increasing order. See DescriptorMatcher::matchAsync
for the details about query and train descriptors.
*/
virtual void knnMatchAsync(InputArray queryDescriptors, InputArray trainDescriptors,
CV_WRAP virtual void knnMatchAsync(InputArray queryDescriptors, InputArray trainDescriptors,
OutputArray matches,
int k,
InputArray mask = noArray(),
@ -262,7 +262,7 @@ public:
/** @overload
*/
virtual void knnMatchAsync(InputArray queryDescriptors,
CV_WRAP virtual void knnMatchAsync(InputArray queryDescriptors,
OutputArray matches,
int k,
const std::vector<GpuMat>& masks = std::vector<GpuMat>(),
@ -279,7 +279,7 @@ public:
false, the matches vector has the same size as queryDescriptors rows. If compactResult is true,
the matches vector does not contain matches for fully masked-out query descriptors.
*/
virtual void knnMatchConvert(InputArray gpu_matches,
CV_WRAP virtual void knnMatchConvert(InputArray gpu_matches,
std::vector< std::vector<DMatch> >& matches,
bool compactResult = false) = 0;
@ -306,16 +306,16 @@ public:
query descriptor and the training descriptor is equal or smaller than maxDistance. Found matches are
returned in the distance increasing order.
*/
virtual void radiusMatch(InputArray queryDescriptors, InputArray trainDescriptors,
std::vector<std::vector<DMatch> >& matches,
CV_WRAP virtual void radiusMatch(InputArray queryDescriptors, InputArray trainDescriptors,
CV_OUT std::vector<std::vector<DMatch> >& matches,
float maxDistance,
InputArray mask = noArray(),
bool compactResult = false) = 0;
/** @overload
*/
virtual void radiusMatch(InputArray queryDescriptors,
std::vector<std::vector<DMatch> >& matches,
CV_WRAP virtual void radiusMatch(InputArray queryDescriptors,
CV_OUT std::vector<std::vector<DMatch> >& matches,
float maxDistance,
const std::vector<GpuMat>& masks = std::vector<GpuMat>(),
bool compactResult = false) = 0;
@ -338,7 +338,7 @@ public:
query descriptor and the training descriptor is equal or smaller than maxDistance. Found matches are
returned in the distance increasing order.
*/
virtual void radiusMatchAsync(InputArray queryDescriptors, InputArray trainDescriptors,
CV_WRAP virtual void radiusMatchAsync(InputArray queryDescriptors, InputArray trainDescriptors,
OutputArray matches,
float maxDistance,
InputArray mask = noArray(),
@ -346,7 +346,7 @@ public:
/** @overload
*/
virtual void radiusMatchAsync(InputArray queryDescriptors,
CV_WRAP virtual void radiusMatchAsync(InputArray queryDescriptors,
OutputArray matches,
float maxDistance,
const std::vector<GpuMat>& masks = std::vector<GpuMat>(),
@ -363,7 +363,7 @@ public:
false, the matches vector has the same size as queryDescriptors rows. If compactResult is true,
the matches vector does not contain matches for fully masked-out query descriptors.
*/
virtual void radiusMatchConvert(InputArray gpu_matches,
CV_WRAP virtual void radiusMatchConvert(InputArray gpu_matches,
std::vector< std::vector<DMatch> >& matches,
bool compactResult = false) = 0;
};
@ -374,10 +374,10 @@ public:
/** @brief Abstract base class for CUDA asynchronous 2D image feature detectors and descriptor extractors.
*/
class CV_EXPORTS Feature2DAsync : public cv::Feature2D
class CV_EXPORTS_W Feature2DAsync : public cv::Feature2D
{
public:
virtual ~Feature2DAsync();
CV_WRAP virtual ~Feature2DAsync();
/** @brief Detects keypoints in an image.
@ -387,7 +387,7 @@ public:
matrix with non-zero values in the region of interest.
@param stream CUDA stream.
*/
virtual void detectAsync(InputArray image,
CV_WRAP virtual void detectAsync(InputArray image,
OutputArray keypoints,
InputArray mask = noArray(),
Stream& stream = Stream::Null());
@ -399,13 +399,13 @@ public:
@param descriptors Computed descriptors. Row j is the descriptor for j-th keypoint.
@param stream CUDA stream.
*/
virtual void computeAsync(InputArray image,
CV_WRAP virtual void computeAsync(InputArray image,
OutputArray keypoints,
OutputArray descriptors,
Stream& stream = Stream::Null());
/** Detects keypoints and computes the descriptors. */
virtual void detectAndComputeAsync(InputArray image,
CV_WRAP virtual void detectAndComputeAsync(InputArray image,
InputArray mask,
OutputArray keypoints,
OutputArray descriptors,
@ -413,7 +413,7 @@ public:
Stream& stream = Stream::Null());
/** Converts keypoints array from internal representation to standard vector. */
virtual void convert(InputArray gpu_keypoints,
CV_WRAP virtual void convert(InputArray gpu_keypoints,
std::vector<KeyPoint>& keypoints) = 0;
};
@ -423,7 +423,7 @@ public:
/** @brief Wrapping class for feature detection using the FAST method.
*/
class CV_EXPORTS FastFeatureDetector : public Feature2DAsync
class CV_EXPORTS_W FastFeatureDetector : public Feature2DAsync
{
public:
enum
@ -435,14 +435,14 @@ public:
FEATURE_SIZE = 7
};
static Ptr<FastFeatureDetector> create(int threshold=10,
CV_WRAP static Ptr<cuda::FastFeatureDetector> create(int threshold=10,
bool nonmaxSuppression=true,
int type=cv::FastFeatureDetector::TYPE_9_16,
int max_npoints = 5000);
virtual void setThreshold(int threshold) = 0;
CV_WRAP virtual void setThreshold(int threshold) = 0;
virtual void setMaxNumPoints(int max_npoints) = 0;
virtual int getMaxNumPoints() const = 0;
CV_WRAP virtual void setMaxNumPoints(int max_npoints) = 0;
CV_WRAP virtual int getMaxNumPoints() const = 0;
};
//
@ -453,7 +453,7 @@ public:
*
* @sa cv::ORB
*/
class CV_EXPORTS ORB : public Feature2DAsync
class CV_EXPORTS_W ORB : public Feature2DAsync
{
public:
enum
@ -467,7 +467,7 @@ public:
ROWS_COUNT
};
static Ptr<ORB> create(int nfeatures=500,
CV_WRAP static Ptr<cuda::ORB> create(int nfeatures=500,
float scaleFactor=1.2f,
int nlevels=8,
int edgeThreshold=31,
@ -479,8 +479,8 @@ public:
bool blurForDescriptor=false);
//! if true, image will be blurred before descriptors calculation
virtual void setBlurForDescriptor(bool blurForDescriptor) = 0;
virtual bool getBlurForDescriptor() const = 0;
CV_WRAP virtual void setBlurForDescriptor(bool blurForDescriptor) = 0;
CV_WRAP virtual bool getBlurForDescriptor() const = 0;
};
//! @}

View File

@ -145,7 +145,7 @@ E.g. Given an RGBA image, aDstOrder = [3,2,1,0] converts this to ABGR channel or
The methods support arbitrary permutations of the original channels, including replication.
*/
CV_EXPORTS_W void swapChannels(InputOutputArray image, const int dstOrder[4], Stream& stream = Stream::Null());
CV_EXPORTS void swapChannels(InputOutputArray image, const int dstOrder[4], Stream& stream = Stream::Null());
/** @brief Routines for correcting image color gamma.
@ -265,7 +265,7 @@ a four-channel image, all channels are processed separately.
*/
CV_EXPORTS_W void histEven(InputArray src, OutputArray hist, int histSize, int lowerLevel, int upperLevel, Stream& stream = Stream::Null());
/** @overload */
CV_EXPORTS void histEven(InputArray src, GpuMat hist[4], int histSize[4], int lowerLevel[4], int upperLevel[4], Stream& stream = Stream::Null());
CV_EXPORTS_W void histEven(InputArray src, GpuMat hist[4], int histSize[4], int lowerLevel[4], int upperLevel[4], Stream& stream = Stream::Null());
/** @brief Calculates a histogram with bins determined by the levels array.
@ -277,7 +277,7 @@ For a four-channel image, all channels are processed separately.
*/
CV_EXPORTS_W void histRange(InputArray src, OutputArray hist, InputArray levels, Stream& stream = Stream::Null());
/** @overload */
CV_EXPORTS void histRange(InputArray src, GpuMat hist[4], const GpuMat levels[4], Stream& stream = Stream::Null());
CV_EXPORTS_W void histRange(InputArray src, GpuMat hist[4], const GpuMat levels[4], Stream& stream = Stream::Null());
//! @} cudaimgproc_hist
@ -338,7 +338,7 @@ CV_EXPORTS_W Ptr<CannyEdgeDetector> createCannyEdgeDetector(double low_thresh, d
/** @brief Base class for lines detector algorithm. :
*/
class CV_EXPORTS HoughLinesDetector : public Algorithm
class CV_EXPORTS_W HoughLinesDetector : public Algorithm
{
public:
/** @brief Finds lines in a binary image using the classical Hough transform.
@ -352,7 +352,7 @@ public:
@sa HoughLines
*/
virtual void detect(InputArray src, OutputArray lines, Stream& stream = Stream::Null()) = 0;
CV_WRAP virtual void detect(InputArray src, OutputArray lines, Stream& stream = Stream::Null()) = 0;
/** @brief Downloads results from cuda::HoughLinesDetector::detect to host memory.
@ -361,22 +361,22 @@ public:
@param h_votes Optional output array for line's votes.
@param stream Stream for the asynchronous version.
*/
virtual void downloadResults(InputArray d_lines, OutputArray h_lines, OutputArray h_votes = noArray(), Stream& stream = Stream::Null()) = 0;
CV_WRAP virtual void downloadResults(InputArray d_lines, OutputArray h_lines, OutputArray h_votes = noArray(), Stream& stream = Stream::Null()) = 0;
virtual void setRho(float rho) = 0;
virtual float getRho() const = 0;
CV_WRAP virtual void setRho(float rho) = 0;
CV_WRAP virtual float getRho() const = 0;
virtual void setTheta(float theta) = 0;
virtual float getTheta() const = 0;
CV_WRAP virtual void setTheta(float theta) = 0;
CV_WRAP virtual float getTheta() const = 0;
virtual void setThreshold(int threshold) = 0;
virtual int getThreshold() const = 0;
CV_WRAP virtual void setThreshold(int threshold) = 0;
CV_WRAP virtual int getThreshold() const = 0;
virtual void setDoSort(bool doSort) = 0;
virtual bool getDoSort() const = 0;
CV_WRAP virtual void setDoSort(bool doSort) = 0;
CV_WRAP virtual bool getDoSort() const = 0;
virtual void setMaxLines(int maxLines) = 0;
virtual int getMaxLines() const = 0;
CV_WRAP virtual void setMaxLines(int maxLines) = 0;
CV_WRAP virtual int getMaxLines() const = 0;
};
/** @brief Creates implementation for cuda::HoughLinesDetector .
@ -388,7 +388,7 @@ votes ( \f$>\texttt{threshold}\f$ ).
@param doSort Performs lines sort by votes.
@param maxLines Maximum number of output lines.
*/
CV_EXPORTS Ptr<HoughLinesDetector> createHoughLinesDetector(float rho, float theta, int threshold, bool doSort = false, int maxLines = 4096);
CV_EXPORTS_W Ptr<HoughLinesDetector> createHoughLinesDetector(float rho, float theta, int threshold, bool doSort = false, int maxLines = 4096);
//////////////////////////////////////
@ -396,7 +396,7 @@ CV_EXPORTS Ptr<HoughLinesDetector> createHoughLinesDetector(float rho, float the
/** @brief Base class for line segments detector algorithm. :
*/
class CV_EXPORTS HoughSegmentDetector : public Algorithm
class CV_EXPORTS_W HoughSegmentDetector : public Algorithm
{
public:
/** @brief Finds line segments in a binary image using the probabilistic Hough transform.
@ -409,22 +409,22 @@ public:
@sa HoughLinesP
*/
virtual void detect(InputArray src, OutputArray lines, Stream& stream = Stream::Null()) = 0;
CV_WRAP virtual void detect(InputArray src, OutputArray lines, Stream& stream = Stream::Null()) = 0;
virtual void setRho(float rho) = 0;
virtual float getRho() const = 0;
CV_WRAP virtual void setRho(float rho) = 0;
CV_WRAP virtual float getRho() const = 0;
virtual void setTheta(float theta) = 0;
virtual float getTheta() const = 0;
CV_WRAP virtual void setTheta(float theta) = 0;
CV_WRAP virtual float getTheta() const = 0;
virtual void setMinLineLength(int minLineLength) = 0;
virtual int getMinLineLength() const = 0;
CV_WRAP virtual void setMinLineLength(int minLineLength) = 0;
CV_WRAP virtual int getMinLineLength() const = 0;
virtual void setMaxLineGap(int maxLineGap) = 0;
virtual int getMaxLineGap() const = 0;
CV_WRAP virtual void setMaxLineGap(int maxLineGap) = 0;
CV_WRAP virtual int getMaxLineGap() const = 0;
virtual void setMaxLines(int maxLines) = 0;
virtual int getMaxLines() const = 0;
CV_WRAP virtual void setMaxLines(int maxLines) = 0;
CV_WRAP virtual int getMaxLines() const = 0;
};
/** @brief Creates implementation for cuda::HoughSegmentDetector .
@ -435,14 +435,14 @@ public:
@param maxLineGap Maximum allowed gap between points on the same line to link them.
@param maxLines Maximum number of output lines.
*/
CV_EXPORTS Ptr<HoughSegmentDetector> createHoughSegmentDetector(float rho, float theta, int minLineLength, int maxLineGap, int maxLines = 4096);
CV_EXPORTS_W Ptr<HoughSegmentDetector> createHoughSegmentDetector(float rho, float theta, int minLineLength, int maxLineGap, int maxLines = 4096);
//////////////////////////////////////
// HoughCircles
/** @brief Base class for circles detector algorithm. :
*/
class CV_EXPORTS HoughCirclesDetector : public Algorithm
class CV_EXPORTS_W HoughCirclesDetector : public Algorithm
{
public:
/** @brief Finds circles in a grayscale image using the Hough transform.
@ -454,28 +454,28 @@ public:
@sa HoughCircles
*/
virtual void detect(InputArray src, OutputArray circles, Stream& stream = Stream::Null()) = 0;
CV_WRAP virtual void detect(InputArray src, OutputArray circles, Stream& stream = Stream::Null()) = 0;
virtual void setDp(float dp) = 0;
virtual float getDp() const = 0;
CV_WRAP virtual void setDp(float dp) = 0;
CV_WRAP virtual float getDp() const = 0;
virtual void setMinDist(float minDist) = 0;
virtual float getMinDist() const = 0;
CV_WRAP virtual void setMinDist(float minDist) = 0;
CV_WRAP virtual float getMinDist() const = 0;
virtual void setCannyThreshold(int cannyThreshold) = 0;
virtual int getCannyThreshold() const = 0;
CV_WRAP virtual void setCannyThreshold(int cannyThreshold) = 0;
CV_WRAP virtual int getCannyThreshold() const = 0;
virtual void setVotesThreshold(int votesThreshold) = 0;
virtual int getVotesThreshold() const = 0;
CV_WRAP virtual void setVotesThreshold(int votesThreshold) = 0;
CV_WRAP virtual int getVotesThreshold() const = 0;
virtual void setMinRadius(int minRadius) = 0;
virtual int getMinRadius() const = 0;
CV_WRAP virtual void setMinRadius(int minRadius) = 0;
CV_WRAP virtual int getMinRadius() const = 0;
virtual void setMaxRadius(int maxRadius) = 0;
virtual int getMaxRadius() const = 0;
CV_WRAP virtual void setMaxRadius(int maxRadius) = 0;
CV_WRAP virtual int getMaxRadius() const = 0;
virtual void setMaxCircles(int maxCircles) = 0;
virtual int getMaxCircles() const = 0;
CV_WRAP virtual void setMaxCircles(int maxCircles) = 0;
CV_WRAP virtual int getMaxCircles() const = 0;
};
/** @brief Creates implementation for cuda::HoughCirclesDetector .
@ -494,18 +494,18 @@ smaller it is, the more false circles may be detected.
@param maxRadius Maximum circle radius.
@param maxCircles Maximum number of output circles.
*/
CV_EXPORTS Ptr<HoughCirclesDetector> createHoughCirclesDetector(float dp, float minDist, int cannyThreshold, int votesThreshold, int minRadius, int maxRadius, int maxCircles = 4096);
CV_EXPORTS_W Ptr<HoughCirclesDetector> createHoughCirclesDetector(float dp, float minDist, int cannyThreshold, int votesThreshold, int minRadius, int maxRadius, int maxCircles = 4096);
//////////////////////////////////////
// GeneralizedHough
/** @brief Creates implementation for generalized hough transform from @cite Ballard1981 .
*/
CV_EXPORTS Ptr<GeneralizedHoughBallard> createGeneralizedHoughBallard();
CV_EXPORTS_W Ptr<GeneralizedHoughBallard> createGeneralizedHoughBallard();
/** @brief Creates implementation for generalized hough transform from @cite Guil1999 .
*/
CV_EXPORTS Ptr<GeneralizedHoughGuil> createGeneralizedHoughGuil();
CV_EXPORTS_W Ptr<GeneralizedHoughGuil> createGeneralizedHoughGuil();
//! @} cudaimgproc_hough
@ -516,7 +516,7 @@ CV_EXPORTS Ptr<GeneralizedHoughGuil> createGeneralizedHoughGuil();
/** @brief Base class for Cornerness Criteria computation. :
*/
class CV_EXPORTS CornernessCriteria : public Algorithm
class CV_EXPORTS_W CornernessCriteria : public Algorithm
{
public:
/** @brief Computes the cornerness criteria at each image pixel.
@ -526,7 +526,7 @@ public:
CV_32FC1 type.
@param stream Stream for the asynchronous version.
*/
virtual void compute(InputArray src, OutputArray dst, Stream& stream = Stream::Null()) = 0;
CV_WRAP virtual void compute(InputArray src, OutputArray dst, Stream& stream = Stream::Null()) = 0;
};
/** @brief Creates implementation for Harris cornerness criteria.
@ -540,7 +540,7 @@ supported for now.
@sa cornerHarris
*/
CV_EXPORTS Ptr<CornernessCriteria> createHarrisCorner(int srcType, int blockSize, int ksize, double k, int borderType = BORDER_REFLECT101);
CV_EXPORTS_W Ptr<CornernessCriteria> createHarrisCorner(int srcType, int blockSize, int ksize, double k, int borderType = BORDER_REFLECT101);
/** @brief Creates implementation for the minimum eigen value of a 2x2 derivative covariation matrix (the
cornerness criteria).
@ -553,13 +553,13 @@ supported for now.
@sa cornerMinEigenVal
*/
CV_EXPORTS Ptr<CornernessCriteria> createMinEigenValCorner(int srcType, int blockSize, int ksize, int borderType = BORDER_REFLECT101);
CV_EXPORTS_W Ptr<CornernessCriteria> createMinEigenValCorner(int srcType, int blockSize, int ksize, int borderType = BORDER_REFLECT101);
////////////////////////// Corners Detection ///////////////////////////
/** @brief Base class for Corners Detector. :
*/
class CV_EXPORTS CornersDetector : public Algorithm
class CV_EXPORTS_W CornersDetector : public Algorithm
{
public:
/** @brief Determines strong corners on an image.
@ -571,7 +571,7 @@ public:
CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
@param stream Stream for the asynchronous version.
*/
virtual void detect(InputArray image, OutputArray corners, InputArray mask = noArray(), Stream& stream = Stream::Null()) = 0;
CV_WRAP virtual void detect(InputArray image, OutputArray corners, InputArray mask = noArray(), Stream& stream = Stream::Null()) = 0;
};
/** @brief Creates implementation for cuda::CornersDetector .
@ -592,7 +592,7 @@ pixel neighborhood. See cornerEigenValsAndVecs .
or cornerMinEigenVal.
@param harrisK Free parameter of the Harris detector.
*/
CV_EXPORTS Ptr<CornersDetector> createGoodFeaturesToTrackDetector(int srcType, int maxCorners = 1000, double qualityLevel = 0.01, double minDistance = 0.0,
CV_EXPORTS_W Ptr<CornersDetector> createGoodFeaturesToTrackDetector(int srcType, int maxCorners = 1000, double qualityLevel = 0.01, double minDistance = 0.0,
int blockSize = 3, bool useHarrisDetector = false, double harrisK = 0.04);
//! @} cudaimgproc_feature
@ -613,7 +613,7 @@ as src .
It maps each point of the source image into another point. As a result, you have a new color and new
position of each point.
*/
CV_EXPORTS void meanShiftFiltering(InputArray src, OutputArray dst, int sp, int sr,
CV_EXPORTS_W void meanShiftFiltering(InputArray src, OutputArray dst, int sp, int sr,
TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1),
Stream& stream = Stream::Null());
@ -632,7 +632,7 @@ src size. The type is CV_16SC2 .
@sa cuda::meanShiftFiltering
*/
CV_EXPORTS void meanShiftProc(InputArray src, OutputArray dstr, OutputArray dstsp, int sp, int sr,
CV_EXPORTS_W void meanShiftProc(InputArray src, OutputArray dstr, OutputArray dstsp, int sp, int sr,
TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1),
Stream& stream = Stream::Null());
@ -646,7 +646,7 @@ CV_EXPORTS void meanShiftProc(InputArray src, OutputArray dstr, OutputArray dsts
@param criteria Termination criteria. See TermCriteria.
@param stream Stream for the asynchronous version.
*/
CV_EXPORTS void meanShiftSegmentation(InputArray src, OutputArray dst, int sp, int sr, int minsize,
CV_EXPORTS_W void meanShiftSegmentation(InputArray src, OutputArray dst, int sp, int sr, int minsize,
TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1),
Stream& stream = Stream::Null());
@ -654,7 +654,7 @@ CV_EXPORTS void meanShiftSegmentation(InputArray src, OutputArray dst, int sp, i
/** @brief Base class for Template Matching. :
*/
class CV_EXPORTS TemplateMatching : public Algorithm
class CV_EXPORTS_W TemplateMatching : public Algorithm
{
public:
/** @brief Computes a proximity map for a raster template and an image where the template is searched for.
@ -665,7 +665,7 @@ public:
x h*, then result must be *W-w+1 x H-h+1*.
@param stream Stream for the asynchronous version.
*/
virtual void match(InputArray image, InputArray templ, OutputArray result, Stream& stream = Stream::Null()) = 0;
CV_WRAP virtual void match(InputArray image, InputArray templ, OutputArray result, Stream& stream = Stream::Null()) = 0;
};
/** @brief Creates implementation for cuda::TemplateMatching .
@ -694,7 +694,7 @@ The following methods are supported for the CV_32F images for now:
@sa matchTemplate
*/
CV_EXPORTS Ptr<TemplateMatching> createTemplateMatching(int srcType, int method, Size user_block_size = Size());
CV_EXPORTS_W Ptr<TemplateMatching> createTemplateMatching(int srcType, int method, Size user_block_size = Size());
////////////////////////// Bilateral Filter ///////////////////////////
@ -712,7 +712,7 @@ BORDER_REPLICATE , BORDER_CONSTANT , BORDER_REFLECT and BORDER_WRAP are supporte
@sa bilateralFilter
*/
CV_EXPORTS void bilateralFilter(InputArray src, OutputArray dst, int kernel_size, float sigma_color, float sigma_spatial,
CV_EXPORTS_W void bilateralFilter(InputArray src, OutputArray dst, int kernel_size, float sigma_color, float sigma_spatial,
int borderMode = BORDER_DEFAULT, Stream& stream = Stream::Null());
///////////////////////////// Blending ////////////////////////////////
@ -728,7 +728,7 @@ type.
@param result Destination image.
@param stream Stream for the asynchronous version.
*/
CV_EXPORTS void blendLinear(InputArray img1, InputArray img2, InputArray weights1, InputArray weights2,
CV_EXPORTS_W void blendLinear(InputArray img1, InputArray img2, InputArray weights1, InputArray weights2,
OutputArray result, Stream& stream = Stream::Null());
//! @}

View File

@ -75,7 +75,7 @@ namespace cv { namespace cuda {
- (Python) An example applying the HOG descriptor for people detection can be found at
opencv_source_code/samples/python/peopledetect.py
*/
class CV_EXPORTS HOG : public Algorithm
class CV_EXPORTS_W HOG : public Algorithm
{
public:
enum
@ -92,70 +92,70 @@ public:
@param cell_size Cell size. Only (8, 8) is supported for now.
@param nbins Number of bins. Only 9 bins per cell are supported for now.
*/
static Ptr<HOG> create(Size win_size = Size(64, 128),
CV_WRAP static Ptr<HOG> create(Size win_size = Size(64, 128),
Size block_size = Size(16, 16),
Size block_stride = Size(8, 8),
Size cell_size = Size(8, 8),
int nbins = 9);
//! Gaussian smoothing window parameter.
virtual void setWinSigma(double win_sigma) = 0;
virtual double getWinSigma() const = 0;
CV_WRAP virtual void setWinSigma(double win_sigma) = 0;
CV_WRAP virtual double getWinSigma() const = 0;
//! L2-Hys normalization method shrinkage.
virtual void setL2HysThreshold(double threshold_L2hys) = 0;
virtual double getL2HysThreshold() const = 0;
CV_WRAP virtual void setL2HysThreshold(double threshold_L2hys) = 0;
CV_WRAP virtual double getL2HysThreshold() const = 0;
//! Flag to specify whether the gamma correction preprocessing is required or not.
virtual void setGammaCorrection(bool gamma_correction) = 0;
virtual bool getGammaCorrection() const = 0;
CV_WRAP virtual void setGammaCorrection(bool gamma_correction) = 0;
CV_WRAP virtual bool getGammaCorrection() const = 0;
//! Maximum number of detection window increases.
virtual void setNumLevels(int nlevels) = 0;
virtual int getNumLevels() const = 0;
CV_WRAP virtual void setNumLevels(int nlevels) = 0;
CV_WRAP virtual int getNumLevels() const = 0;
//! Threshold for the distance between features and SVM classifying plane.
//! Usually it is 0 and should be specified in the detector coefficients (as the last free
//! coefficient). But if the free coefficient is omitted (which is allowed), you can specify it
//! manually here.
virtual void setHitThreshold(double hit_threshold) = 0;
virtual double getHitThreshold() const = 0;
CV_WRAP virtual void setHitThreshold(double hit_threshold) = 0;
CV_WRAP virtual double getHitThreshold() const = 0;
//! Window stride. It must be a multiple of block stride.
virtual void setWinStride(Size win_stride) = 0;
virtual Size getWinStride() const = 0;
CV_WRAP virtual void setWinStride(Size win_stride) = 0;
CV_WRAP virtual Size getWinStride() const = 0;
//! Coefficient of the detection window increase.
virtual void setScaleFactor(double scale0) = 0;
virtual double getScaleFactor() const = 0;
CV_WRAP virtual void setScaleFactor(double scale0) = 0;
CV_WRAP virtual double getScaleFactor() const = 0;
//! Coefficient to regulate the similarity threshold. When detected, some
//! objects can be covered by many rectangles. 0 means not to perform grouping.
//! See groupRectangles.
virtual void setGroupThreshold(int group_threshold) = 0;
virtual int getGroupThreshold() const = 0;
CV_WRAP virtual void setGroupThreshold(int group_threshold) = 0;
CV_WRAP virtual int getGroupThreshold() const = 0;
//! Descriptor storage format:
//! - **DESCR_FORMAT_ROW_BY_ROW** - Row-major order.
//! - **DESCR_FORMAT_COL_BY_COL** - Column-major order.
virtual void setDescriptorFormat(int descr_format) = 0;
virtual int getDescriptorFormat() const = 0;
CV_WRAP virtual void setDescriptorFormat(int descr_format) = 0;
CV_WRAP virtual int getDescriptorFormat() const = 0;
/** @brief Returns the number of coefficients required for the classification.
*/
virtual size_t getDescriptorSize() const = 0;
CV_WRAP virtual size_t getDescriptorSize() const = 0;
/** @brief Returns the block histogram size.
*/
virtual size_t getBlockHistogramSize() const = 0;
CV_WRAP virtual size_t getBlockHistogramSize() const = 0;
/** @brief Sets coefficients for the linear SVM classifier.
*/
virtual void setSVMDetector(InputArray detector) = 0;
CV_WRAP virtual void setSVMDetector(InputArray detector) = 0;
/** @brief Returns coefficients of the classifier trained for people detection.
*/
virtual Mat getDefaultPeopleDetector() const = 0;
CV_WRAP virtual Mat getDefaultPeopleDetector() const = 0;
/** @brief Performs object detection without a multi-scale window.
@ -183,7 +183,7 @@ public:
@param descriptors 2D array of descriptors.
@param stream CUDA stream.
*/
virtual void compute(InputArray img,
CV_WRAP virtual void compute(InputArray img,
OutputArray descriptors,
Stream& stream = Stream::Null()) = 0;
};
@ -200,7 +200,7 @@ public:
- A Nvidea API specific cascade classifier example can be found at
opencv_source_code/samples/gpu/cascadeclassifier_nvidia_api.cpp
*/
class CV_EXPORTS CascadeClassifier : public Algorithm
class CV_EXPORTS_W CascadeClassifier : public Algorithm
{
public:
/** @brief Loads the classifier from a file. Cascade type is detected automatically by constructor parameter.
@ -209,36 +209,36 @@ public:
(trained by the haar training application) and NVIDIA's nvbin are supported for HAAR and only new
type of OpenCV XML cascade supported for LBP. The working haar models can be found at opencv_folder/data/haarcascades_cuda/
*/
static Ptr<CascadeClassifier> create(const String& filename);
CV_WRAP static Ptr<cuda::CascadeClassifier> create(const String& filename);
/** @overload
*/
static Ptr<CascadeClassifier> create(const FileStorage& file);
static Ptr<cuda::CascadeClassifier> create(const FileStorage& file);
//! Maximum possible object size. Objects larger than that are ignored. Used for
//! second signature and supported only for LBP cascades.
virtual void setMaxObjectSize(Size maxObjectSize) = 0;
virtual Size getMaxObjectSize() const = 0;
CV_WRAP virtual void setMaxObjectSize(Size maxObjectSize) = 0;
CV_WRAP virtual Size getMaxObjectSize() const = 0;
//! Minimum possible object size. Objects smaller than that are ignored.
virtual void setMinObjectSize(Size minSize) = 0;
virtual Size getMinObjectSize() const = 0;
CV_WRAP virtual void setMinObjectSize(Size minSize) = 0;
CV_WRAP virtual Size getMinObjectSize() const = 0;
//! Parameter specifying how much the image size is reduced at each image scale.
virtual void setScaleFactor(double scaleFactor) = 0;
virtual double getScaleFactor() const = 0;
CV_WRAP virtual void setScaleFactor(double scaleFactor) = 0;
CV_WRAP virtual double getScaleFactor() const = 0;
//! Parameter specifying how many neighbors each candidate rectangle should have
//! to retain it.
virtual void setMinNeighbors(int minNeighbors) = 0;
virtual int getMinNeighbors() const = 0;
CV_WRAP virtual void setMinNeighbors(int minNeighbors) = 0;
CV_WRAP virtual int getMinNeighbors() const = 0;
virtual void setFindLargestObject(bool findLargestObject) = 0;
virtual bool getFindLargestObject() = 0;
CV_WRAP virtual void setFindLargestObject(bool findLargestObject) = 0;
CV_WRAP virtual bool getFindLargestObject() = 0;
virtual void setMaxNumObjects(int maxNumObjects) = 0;
virtual int getMaxNumObjects() const = 0;
CV_WRAP virtual void setMaxNumObjects(int maxNumObjects) = 0;
CV_WRAP virtual int getMaxNumObjects() const = 0;
virtual Size getClassifierSize() const = 0;
CV_WRAP virtual Size getClassifierSize() const = 0;
/** @brief Detects objects of different sizes in the input image.
@ -268,7 +268,7 @@ public:
@sa CascadeClassifier::detectMultiScale
*/
virtual void detectMultiScale(InputArray image,
CV_WRAP virtual void detectMultiScale(InputArray image,
OutputArray objects,
Stream& stream = Stream::Null()) = 0;
@ -277,7 +277,7 @@ public:
@param gpu_objects Objects array in internal representation.
@param objects Resulting array.
*/
virtual void convert(OutputArray gpu_objects,
CV_WRAP virtual void convert(OutputArray gpu_objects,
std::vector<Rect>& objects) = 0;
};

View File

@ -67,7 +67,7 @@ namespace cv { namespace cuda {
/** @brief Base interface for dense optical flow algorithms.
*/
class CV_EXPORTS DenseOpticalFlow : public Algorithm
class CV_EXPORTS_W DenseOpticalFlow : public Algorithm
{
public:
/** @brief Calculates a dense optical flow.
@ -77,12 +77,12 @@ public:
@param flow computed flow image that has the same size as I0 and type CV_32FC2.
@param stream Stream for the asynchronous version.
*/
virtual void calc(InputArray I0, InputArray I1, InputOutputArray flow, Stream& stream = Stream::Null()) = 0;
CV_WRAP virtual void calc(InputArray I0, InputArray I1, InputOutputArray flow, Stream& stream = Stream::Null()) = 0;
};
/** @brief Base interface for sparse optical flow algorithms.
*/
class CV_EXPORTS SparseOpticalFlow : public Algorithm
class CV_EXPORTS_W SparseOpticalFlow : public Algorithm
{
public:
/** @brief Calculates a sparse optical flow.
@ -96,7 +96,7 @@ public:
@param err Optional output vector that contains error response for each point (inverse confidence).
@param stream Stream for the asynchronous version.
*/
virtual void calc(InputArray prevImg, InputArray nextImg,
CV_WRAP virtual void calc(InputArray prevImg, InputArray nextImg,
InputArray prevPts, InputOutputArray nextPts,
OutputArray status,
OutputArray err = cv::noArray(),
@ -109,31 +109,31 @@ public:
/** @brief Class computing the optical flow for two images using Brox et al Optical Flow algorithm (@cite Brox2004).
*/
class CV_EXPORTS BroxOpticalFlow : public DenseOpticalFlow
class CV_EXPORTS_W BroxOpticalFlow : public DenseOpticalFlow
{
public:
virtual double getFlowSmoothness() const = 0;
virtual void setFlowSmoothness(double alpha) = 0;
CV_WRAP virtual double getFlowSmoothness() const = 0;
CV_WRAP virtual void setFlowSmoothness(double alpha) = 0;
virtual double getGradientConstancyImportance() const = 0;
virtual void setGradientConstancyImportance(double gamma) = 0;
CV_WRAP virtual double getGradientConstancyImportance() const = 0;
CV_WRAP virtual void setGradientConstancyImportance(double gamma) = 0;
virtual double getPyramidScaleFactor() const = 0;
virtual void setPyramidScaleFactor(double scale_factor) = 0;
CV_WRAP virtual double getPyramidScaleFactor() const = 0;
CV_WRAP virtual void setPyramidScaleFactor(double scale_factor) = 0;
//! number of lagged non-linearity iterations (inner loop)
virtual int getInnerIterations() const = 0;
virtual void setInnerIterations(int inner_iterations) = 0;
CV_WRAP virtual int getInnerIterations() const = 0;
CV_WRAP virtual void setInnerIterations(int inner_iterations) = 0;
//! number of warping iterations (number of pyramid levels)
virtual int getOuterIterations() const = 0;
virtual void setOuterIterations(int outer_iterations) = 0;
CV_WRAP virtual int getOuterIterations() const = 0;
CV_WRAP virtual void setOuterIterations(int outer_iterations) = 0;
//! number of linear system solver iterations
virtual int getSolverIterations() const = 0;
virtual void setSolverIterations(int solver_iterations) = 0;
CV_WRAP virtual int getSolverIterations() const = 0;
CV_WRAP virtual void setSolverIterations(int solver_iterations) = 0;
static Ptr<BroxOpticalFlow> create(
CV_WRAP static Ptr<BroxOpticalFlow> create(
double alpha = 0.197,
double gamma = 50.0,
double scale_factor = 0.8,
@ -157,22 +157,22 @@ iterative Lucas-Kanade method with pyramids.
- An example of the Lucas Kanade optical flow algorithm can be found at
opencv_source_code/samples/gpu/pyrlk_optical_flow.cpp
*/
class CV_EXPORTS SparsePyrLKOpticalFlow : public SparseOpticalFlow
class CV_EXPORTS_W SparsePyrLKOpticalFlow : public SparseOpticalFlow
{
public:
virtual Size getWinSize() const = 0;
virtual void setWinSize(Size winSize) = 0;
CV_WRAP virtual Size getWinSize() const = 0;
CV_WRAP virtual void setWinSize(Size winSize) = 0;
virtual int getMaxLevel() const = 0;
virtual void setMaxLevel(int maxLevel) = 0;
CV_WRAP virtual int getMaxLevel() const = 0;
CV_WRAP virtual void setMaxLevel(int maxLevel) = 0;
virtual int getNumIters() const = 0;
virtual void setNumIters(int iters) = 0;
CV_WRAP virtual int getNumIters() const = 0;
CV_WRAP virtual void setNumIters(int iters) = 0;
virtual bool getUseInitialFlow() const = 0;
virtual void setUseInitialFlow(bool useInitialFlow) = 0;
CV_WRAP virtual bool getUseInitialFlow() const = 0;
CV_WRAP virtual void setUseInitialFlow(bool useInitialFlow) = 0;
static Ptr<SparsePyrLKOpticalFlow> create(
CV_WRAP static Ptr<cuda::SparsePyrLKOpticalFlow> create(
Size winSize = Size(21, 21),
int maxLevel = 3,
int iters = 30,
@ -184,22 +184,22 @@ public:
The class can calculate an optical flow for a dense optical flow using the
iterative Lucas-Kanade method with pyramids.
*/
class CV_EXPORTS DensePyrLKOpticalFlow : public DenseOpticalFlow
class CV_EXPORTS_W DensePyrLKOpticalFlow : public DenseOpticalFlow
{
public:
virtual Size getWinSize() const = 0;
virtual void setWinSize(Size winSize) = 0;
CV_WRAP virtual Size getWinSize() const = 0;
CV_WRAP virtual void setWinSize(Size winSize) = 0;
virtual int getMaxLevel() const = 0;
virtual void setMaxLevel(int maxLevel) = 0;
CV_WRAP virtual int getMaxLevel() const = 0;
CV_WRAP virtual void setMaxLevel(int maxLevel) = 0;
virtual int getNumIters() const = 0;
virtual void setNumIters(int iters) = 0;
CV_WRAP virtual int getNumIters() const = 0;
CV_WRAP virtual void setNumIters(int iters) = 0;
virtual bool getUseInitialFlow() const = 0;
virtual void setUseInitialFlow(bool useInitialFlow) = 0;
CV_WRAP virtual bool getUseInitialFlow() const = 0;
CV_WRAP virtual void setUseInitialFlow(bool useInitialFlow) = 0;
static Ptr<DensePyrLKOpticalFlow> create(
CV_WRAP static Ptr<DensePyrLKOpticalFlow> create(
Size winSize = Size(13, 13),
int maxLevel = 3,
int iters = 30,
@ -212,34 +212,34 @@ public:
/** @brief Class computing a dense optical flow using the Gunnar Farneback's algorithm.
*/
class CV_EXPORTS FarnebackOpticalFlow : public DenseOpticalFlow
class CV_EXPORTS_W FarnebackOpticalFlow : public DenseOpticalFlow
{
public:
virtual int getNumLevels() const = 0;
virtual void setNumLevels(int numLevels) = 0;
CV_WRAP virtual int getNumLevels() const = 0;
CV_WRAP virtual void setNumLevels(int numLevels) = 0;
virtual double getPyrScale() const = 0;
virtual void setPyrScale(double pyrScale) = 0;
CV_WRAP virtual double getPyrScale() const = 0;
CV_WRAP virtual void setPyrScale(double pyrScale) = 0;
virtual bool getFastPyramids() const = 0;
virtual void setFastPyramids(bool fastPyramids) = 0;
CV_WRAP virtual bool getFastPyramids() const = 0;
CV_WRAP virtual void setFastPyramids(bool fastPyramids) = 0;
virtual int getWinSize() const = 0;
virtual void setWinSize(int winSize) = 0;
CV_WRAP virtual int getWinSize() const = 0;
CV_WRAP virtual void setWinSize(int winSize) = 0;
virtual int getNumIters() const = 0;
virtual void setNumIters(int numIters) = 0;
CV_WRAP virtual int getNumIters() const = 0;
CV_WRAP virtual void setNumIters(int numIters) = 0;
virtual int getPolyN() const = 0;
virtual void setPolyN(int polyN) = 0;
CV_WRAP virtual int getPolyN() const = 0;
CV_WRAP virtual void setPolyN(int polyN) = 0;
virtual double getPolySigma() const = 0;
virtual void setPolySigma(double polySigma) = 0;
CV_WRAP virtual double getPolySigma() const = 0;
CV_WRAP virtual void setPolySigma(double polySigma) = 0;
virtual int getFlags() const = 0;
virtual void setFlags(int flags) = 0;
CV_WRAP virtual int getFlags() const = 0;
CV_WRAP virtual void setFlags(int flags) = 0;
static Ptr<FarnebackOpticalFlow> create(
CV_WRAP static Ptr<cuda::FarnebackOpticalFlow> create(
int numLevels = 5,
double pyrScale = 0.5,
bool fastPyramids = false,
@ -259,14 +259,14 @@ public:
* @sa C. Zach, T. Pock and H. Bischof, "A Duality Based Approach for Realtime TV-L1 Optical Flow".
* @sa Javier Sanchez, Enric Meinhardt-Llopis and Gabriele Facciolo. "TV-L1 Optical Flow Estimation".
*/
class CV_EXPORTS OpticalFlowDual_TVL1 : public DenseOpticalFlow
class CV_EXPORTS_W OpticalFlowDual_TVL1 : public DenseOpticalFlow
{
public:
/**
* Time step of the numerical scheme.
*/
virtual double getTau() const = 0;
virtual void setTau(double tau) = 0;
CV_WRAP virtual double getTau() const = 0;
CV_WRAP virtual void setTau(double tau) = 0;
/**
* Weight parameter for the data term, attachment parameter.
@ -274,8 +274,8 @@ public:
* The smaller this parameter is, the smoother the solutions we obtain.
* It depends on the range of motions of the images, so its value should be adapted to each image sequence.
*/
virtual double getLambda() const = 0;
virtual void setLambda(double lambda) = 0;
CV_WRAP virtual double getLambda() const = 0;
CV_WRAP virtual void setLambda(double lambda) = 0;
/**
* Weight parameter for (u - v)^2, tightness parameter.
@ -283,8 +283,8 @@ public:
* In theory, it should have a small value in order to maintain both parts in correspondence.
* The method is stable for a large range of values of this parameter.
*/
virtual double getGamma() const = 0;
virtual void setGamma(double gamma) = 0;
CV_WRAP virtual double getGamma() const = 0;
CV_WRAP virtual void setGamma(double gamma) = 0;
/**
* parameter used for motion estimation. It adds a variable allowing for illumination variations
@ -292,14 +292,14 @@ public:
* See: Chambolle et al, A First-Order Primal-Dual Algorithm for Convex Problems with Applications to Imaging
* Journal of Mathematical imaging and vision, may 2011 Vol 40 issue 1, pp 120-145
*/
virtual double getTheta() const = 0;
virtual void setTheta(double theta) = 0;
CV_WRAP virtual double getTheta() const = 0;
CV_WRAP virtual void setTheta(double theta) = 0;
/**
* Number of scales used to create the pyramid of images.
*/
virtual int getNumScales() const = 0;
virtual void setNumScales(int nscales) = 0;
CV_WRAP virtual int getNumScales() const = 0;
CV_WRAP virtual void setNumScales(int nscales) = 0;
/**
* Number of warpings per scale.
@ -307,29 +307,29 @@ public:
* This is a parameter that assures the stability of the method.
* It also affects the running time, so it is a compromise between speed and accuracy.
*/
virtual int getNumWarps() const = 0;
virtual void setNumWarps(int warps) = 0;
CV_WRAP virtual int getNumWarps() const = 0;
CV_WRAP virtual void setNumWarps(int warps) = 0;
/**
* Stopping criterion threshold used in the numerical scheme, which is a trade-off between precision and running time.
* A small value will yield more accurate solutions at the expense of a slower convergence.
*/
virtual double getEpsilon() const = 0;
virtual void setEpsilon(double epsilon) = 0;
CV_WRAP virtual double getEpsilon() const = 0;
CV_WRAP virtual void setEpsilon(double epsilon) = 0;
/**
* Stopping criterion iterations number used in the numerical scheme.
*/
virtual int getNumIterations() const = 0;
virtual void setNumIterations(int iterations) = 0;
CV_WRAP virtual int getNumIterations() const = 0;
CV_WRAP virtual void setNumIterations(int iterations) = 0;
virtual double getScaleStep() const = 0;
virtual void setScaleStep(double scaleStep) = 0;
CV_WRAP virtual double getScaleStep() const = 0;
CV_WRAP virtual void setScaleStep(double scaleStep) = 0;
virtual bool getUseInitialFlow() const = 0;
virtual void setUseInitialFlow(bool useInitialFlow) = 0;
CV_WRAP virtual bool getUseInitialFlow() const = 0;
CV_WRAP virtual void setUseInitialFlow(bool useInitialFlow) = 0;
static Ptr<OpticalFlowDual_TVL1> create(
CV_WRAP static Ptr<OpticalFlowDual_TVL1> create(
double tau = 0.25,
double lambda = 0.15,
double theta = 0.3,

View File

@ -69,12 +69,12 @@ namespace cv { namespace cuda {
@sa StereoBM
*/
class CV_EXPORTS StereoBM : public cv::StereoBM
class CV_EXPORTS_W StereoBM : public cv::StereoBM
{
public:
using cv::StereoBM::compute;
virtual void compute(InputArray left, InputArray right, OutputArray disparity, Stream& stream) = 0;
CV_WRAP virtual void compute(InputArray left, InputArray right, OutputArray disparity, Stream& stream) = 0;
};
/** @brief Creates StereoBM object.
@ -87,7 +87,7 @@ shifted by changing the minimum disparity.
accurate disparity map. Smaller block size gives more detailed disparity map, but there is higher
chance for algorithm to find a wrong correspondence.
*/
CV_EXPORTS Ptr<cuda::StereoBM> createStereoBM(int numDisparities = 64, int blockSize = 19);
CV_EXPORTS_W Ptr<cuda::StereoBM> createStereoBM(int numDisparities = 64, int blockSize = 19);
/////////////////////////////////////////
// StereoBeliefPropagation
@ -125,13 +125,13 @@ requirement:
@sa StereoMatcher
*/
class CV_EXPORTS StereoBeliefPropagation : public cv::StereoMatcher
class CV_EXPORTS_W StereoBeliefPropagation : public cv::StereoMatcher
{
public:
using cv::StereoMatcher::compute;
/** @overload */
virtual void compute(InputArray left, InputArray right, OutputArray disparity, Stream& stream) = 0;
CV_WRAP virtual void compute(InputArray left, InputArray right, OutputArray disparity, Stream& stream) = 0;
/** @brief Enables the stereo correspondence operator that finds the disparity for the specified data cost.
@ -142,40 +142,40 @@ public:
fractional bits.
@param stream Stream for the asynchronous version.
*/
virtual void compute(InputArray data, OutputArray disparity, Stream& stream = Stream::Null()) = 0;
CV_WRAP virtual void compute(InputArray data, OutputArray disparity, Stream& stream = Stream::Null()) = 0;
//! number of BP iterations on each level
virtual int getNumIters() const = 0;
virtual void setNumIters(int iters) = 0;
CV_WRAP virtual int getNumIters() const = 0;
CV_WRAP virtual void setNumIters(int iters) = 0;
//! number of levels
virtual int getNumLevels() const = 0;
virtual void setNumLevels(int levels) = 0;
CV_WRAP virtual int getNumLevels() const = 0;
CV_WRAP virtual void setNumLevels(int levels) = 0;
//! truncation of data cost
virtual double getMaxDataTerm() const = 0;
virtual void setMaxDataTerm(double max_data_term) = 0;
CV_WRAP virtual double getMaxDataTerm() const = 0;
CV_WRAP virtual void setMaxDataTerm(double max_data_term) = 0;
//! data weight
virtual double getDataWeight() const = 0;
virtual void setDataWeight(double data_weight) = 0;
CV_WRAP virtual double getDataWeight() const = 0;
CV_WRAP virtual void setDataWeight(double data_weight) = 0;
//! truncation of discontinuity cost
virtual double getMaxDiscTerm() const = 0;
virtual void setMaxDiscTerm(double max_disc_term) = 0;
CV_WRAP virtual double getMaxDiscTerm() const = 0;
CV_WRAP virtual void setMaxDiscTerm(double max_disc_term) = 0;
//! discontinuity single jump
virtual double getDiscSingleJump() const = 0;
virtual void setDiscSingleJump(double disc_single_jump) = 0;
CV_WRAP virtual double getDiscSingleJump() const = 0;
CV_WRAP virtual void setDiscSingleJump(double disc_single_jump) = 0;
//! type for messages (CV_16SC1 or CV_32FC1)
virtual int getMsgType() const = 0;
virtual void setMsgType(int msg_type) = 0;
CV_WRAP virtual int getMsgType() const = 0;
CV_WRAP virtual void setMsgType(int msg_type) = 0;
/** @brief Uses a heuristic method to compute the recommended parameters ( ndisp, iters and levels ) for the
specified image size ( width and height ).
*/
static void estimateRecommendedParams(int width, int height, int& ndisp, int& iters, int& levels);
CV_WRAP static void estimateRecommendedParams(int width, int height, int& ndisp, int& iters, int& levels);
};
/** @brief Creates StereoBeliefPropagation object.
@ -185,7 +185,7 @@ public:
@param levels Number of levels.
@param msg_type Type for messages. CV_16SC1 and CV_32FC1 types are supported.
*/
CV_EXPORTS Ptr<cuda::StereoBeliefPropagation>
CV_EXPORTS_W Ptr<cuda::StereoBeliefPropagation>
createStereoBeliefPropagation(int ndisp = 64, int iters = 5, int levels = 5, int msg_type = CV_32F);
/////////////////////////////////////////
@ -214,20 +214,20 @@ requirement:
\f[10 \cdot 2^{levels-1} \cdot max \_ data \_ term < SHRT \_ MAX\f]
*/
class CV_EXPORTS StereoConstantSpaceBP : public cuda::StereoBeliefPropagation
class CV_EXPORTS_W StereoConstantSpaceBP : public cuda::StereoBeliefPropagation
{
public:
//! number of active disparity on the first level
virtual int getNrPlane() const = 0;
virtual void setNrPlane(int nr_plane) = 0;
CV_WRAP virtual int getNrPlane() const = 0;
CV_WRAP virtual void setNrPlane(int nr_plane) = 0;
virtual bool getUseLocalInitDataCost() const = 0;
virtual void setUseLocalInitDataCost(bool use_local_init_data_cost) = 0;
CV_WRAP virtual bool getUseLocalInitDataCost() const = 0;
CV_WRAP virtual void setUseLocalInitDataCost(bool use_local_init_data_cost) = 0;
/** @brief Uses a heuristic method to compute parameters (ndisp, iters, levelsand nrplane) for the specified
image size (widthand height).
*/
static void estimateRecommendedParams(int width, int height, int& ndisp, int& iters, int& levels, int& nr_plane);
CV_WRAP static void estimateRecommendedParams(int width, int height, int& ndisp, int& iters, int& levels, int& nr_plane);
};
/** @brief Creates StereoConstantSpaceBP object.
@ -238,7 +238,7 @@ public:
@param nr_plane Number of disparity levels on the first level.
@param msg_type Type for messages. CV_16SC1 and CV_32FC1 types are supported.
*/
CV_EXPORTS Ptr<cuda::StereoConstantSpaceBP>
CV_EXPORTS_W Ptr<cuda::StereoConstantSpaceBP>
createStereoConstantSpaceBP(int ndisp = 128, int iters = 8, int levels = 4, int nr_plane = 4, int msg_type = CV_32F);
/////////////////////////////////////////
@ -248,7 +248,7 @@ CV_EXPORTS Ptr<cuda::StereoConstantSpaceBP>
The class implements @cite Yang2010 algorithm.
*/
class CV_EXPORTS DisparityBilateralFilter : public cv::Algorithm
class CV_EXPORTS_W DisparityBilateralFilter : public cv::Algorithm
{
public:
/** @brief Refines a disparity map using joint bilateral filtering.
@ -258,28 +258,28 @@ public:
@param dst Destination disparity map. It has the same size and type as disparity .
@param stream Stream for the asynchronous version.
*/
virtual void apply(InputArray disparity, InputArray image, OutputArray dst, Stream& stream = Stream::Null()) = 0;
CV_WRAP virtual void apply(InputArray disparity, InputArray image, OutputArray dst, Stream& stream = Stream::Null()) = 0;
virtual int getNumDisparities() const = 0;
virtual void setNumDisparities(int numDisparities) = 0;
CV_WRAP virtual int getNumDisparities() const = 0;
CV_WRAP virtual void setNumDisparities(int numDisparities) = 0;
virtual int getRadius() const = 0;
virtual void setRadius(int radius) = 0;
CV_WRAP virtual int getRadius() const = 0;
CV_WRAP virtual void setRadius(int radius) = 0;
virtual int getNumIters() const = 0;
virtual void setNumIters(int iters) = 0;
CV_WRAP virtual int getNumIters() const = 0;
CV_WRAP virtual void setNumIters(int iters) = 0;
//! truncation of data continuity
virtual double getEdgeThreshold() const = 0;
virtual void setEdgeThreshold(double edge_threshold) = 0;
CV_WRAP virtual double getEdgeThreshold() const = 0;
CV_WRAP virtual void setEdgeThreshold(double edge_threshold) = 0;
//! truncation of disparity continuity
virtual double getMaxDiscThreshold() const = 0;
virtual void setMaxDiscThreshold(double max_disc_threshold) = 0;
CV_WRAP virtual double getMaxDiscThreshold() const = 0;
CV_WRAP virtual void setMaxDiscThreshold(double max_disc_threshold) = 0;
//! filter range sigma
virtual double getSigmaRange() const = 0;
virtual void setSigmaRange(double sigma_range) = 0;
CV_WRAP virtual double getSigmaRange() const = 0;
CV_WRAP virtual void setSigmaRange(double sigma_range) = 0;
};
/** @brief Creates DisparityBilateralFilter object.
@ -288,7 +288,7 @@ public:
@param radius Filter radius.
@param iters Number of iterations.
*/
CV_EXPORTS Ptr<cuda::DisparityBilateralFilter>
CV_EXPORTS_W Ptr<cuda::DisparityBilateralFilter>
createDisparityBilateralFilter(int ndisp = 64, int radius = 3, int iters = 1);
/////////////////////////////////////////
@ -308,7 +308,7 @@ disparity map.
@sa reprojectImageTo3D
*/
CV_EXPORTS void reprojectImageTo3D(InputArray disp, OutputArray xyzw, InputArray Q, int dst_cn = 4, Stream& stream = Stream::Null());
CV_EXPORTS_W void reprojectImageTo3D(InputArray disp, OutputArray xyzw, InputArray Q, int dst_cn = 4, Stream& stream = Stream::Null());
/** @brief Colors a disparity image.
@ -324,7 +324,7 @@ This function draws a colored disparity map by converting disparity values from
first to HSV color space (where different disparity values correspond to different hues) and then
converting the pixels to RGB for visualization.
*/
CV_EXPORTS void drawColorDisp(InputArray src_disp, OutputArray dst_disp, int ndisp, Stream& stream = Stream::Null());
CV_EXPORTS_W void drawColorDisp(InputArray src_disp, OutputArray dst_disp, int ndisp, Stream& stream = Stream::Null());
//! @}

View File

@ -83,7 +83,7 @@ Values of pixels with non-integer coordinates are computed using the bilinear in
@sa remap
*/
CV_EXPORTS void remap(InputArray src, OutputArray dst, InputArray xmap, InputArray ymap,
CV_EXPORTS_W void remap(InputArray src, OutputArray dst, InputArray xmap, InputArray ymap,
int interpolation, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar(),
Stream& stream = Stream::Null());
@ -105,7 +105,7 @@ supported for now.
@sa resize
*/
CV_EXPORTS void resize(InputArray src, OutputArray dst, Size dsize, double fx=0, double fy=0, int interpolation = INTER_LINEAR, Stream& stream = Stream::Null());
CV_EXPORTS_W void resize(InputArray src, OutputArray dst, Size dsize, double fx=0, double fy=0, int interpolation = INTER_LINEAR, Stream& stream = Stream::Null());
/** @brief Applies an affine transformation to an image.
@ -123,7 +123,7 @@ INTER_NEAREST , INTER_LINEAR , and INTER_CUBIC interpolation methods are support
@sa warpAffine
*/
CV_EXPORTS void warpAffine(InputArray src, OutputArray dst, InputArray M, Size dsize, int flags = INTER_LINEAR,
CV_EXPORTS_W void warpAffine(InputArray src, OutputArray dst, InputArray M, Size dsize, int flags = INTER_LINEAR,
int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar(), Stream& stream = Stream::Null());
/** @brief Builds transformation maps for affine transformation.
@ -137,7 +137,7 @@ CV_EXPORTS void warpAffine(InputArray src, OutputArray dst, InputArray M, Size d
@sa cuda::warpAffine , cuda::remap
*/
CV_EXPORTS void buildWarpAffineMaps(InputArray M, bool inverse, Size dsize, OutputArray xmap, OutputArray ymap, Stream& stream = Stream::Null());
CV_EXPORTS_W void buildWarpAffineMaps(InputArray M, bool inverse, Size dsize, OutputArray xmap, OutputArray ymap, Stream& stream = Stream::Null());
/** @brief Applies a perspective transformation to an image.
@ -155,7 +155,7 @@ INTER_NEAREST , INTER_LINEAR , and INTER_CUBIC interpolation methods are support
@sa warpPerspective
*/
CV_EXPORTS void warpPerspective(InputArray src, OutputArray dst, InputArray M, Size dsize, int flags = INTER_LINEAR,
CV_EXPORTS_W void warpPerspective(InputArray src, OutputArray dst, InputArray M, Size dsize, int flags = INTER_LINEAR,
int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar(), Stream& stream = Stream::Null());
/** @brief Builds transformation maps for perspective transformation.
@ -169,7 +169,7 @@ CV_EXPORTS void warpPerspective(InputArray src, OutputArray dst, InputArray M, S
@sa cuda::warpPerspective , cuda::remap
*/
CV_EXPORTS void buildWarpPerspectiveMaps(InputArray M, bool inverse, Size dsize, OutputArray xmap, OutputArray ymap, Stream& stream = Stream::Null());
CV_EXPORTS_W void buildWarpPerspectiveMaps(InputArray M, bool inverse, Size dsize, OutputArray xmap, OutputArray ymap, Stream& stream = Stream::Null());
/** @brief Rotates an image around the origin (0,0) and then shifts it.
@ -186,7 +186,7 @@ are supported.
@sa cuda::warpAffine
*/
CV_EXPORTS void rotate(InputArray src, OutputArray dst, Size dsize, double angle, double xShift = 0, double yShift = 0,
CV_EXPORTS_W void rotate(InputArray src, OutputArray dst, Size dsize, double angle, double xShift = 0, double yShift = 0,
int interpolation = INTER_LINEAR, Stream& stream = Stream::Null());
/** @brief Smoothes an image and downsamples it.
@ -198,7 +198,7 @@ type as src .
@sa pyrDown
*/
CV_EXPORTS void pyrDown(InputArray src, OutputArray dst, Stream& stream = Stream::Null());
CV_EXPORTS_W void pyrDown(InputArray src, OutputArray dst, Stream& stream = Stream::Null());
/** @brief Upsamples an image and then smoothes it.
@ -207,7 +207,7 @@ CV_EXPORTS void pyrDown(InputArray src, OutputArray dst, Stream& stream = Stream
src .
@param stream Stream for the asynchronous version.
*/
CV_EXPORTS void pyrUp(InputArray src, OutputArray dst, Stream& stream = Stream::Null());
CV_EXPORTS_W void pyrUp(InputArray src, OutputArray dst, Stream& stream = Stream::Null());
//! @}

View File

@ -829,55 +829,55 @@ enum MarkerTypes
};
//! finds arbitrary template in the grayscale image using Generalized Hough Transform
class CV_EXPORTS GeneralizedHough : public Algorithm
class CV_EXPORTS_W GeneralizedHough : public Algorithm
{
public:
//! set template to search
virtual void setTemplate(InputArray templ, Point templCenter = Point(-1, -1)) = 0;
virtual void setTemplate(InputArray edges, InputArray dx, InputArray dy, Point templCenter = Point(-1, -1)) = 0;
CV_WRAP virtual void setTemplate(InputArray templ, Point templCenter = Point(-1, -1)) = 0;
CV_WRAP virtual void setTemplate(InputArray edges, InputArray dx, InputArray dy, Point templCenter = Point(-1, -1)) = 0;
//! find template on image
virtual void detect(InputArray image, OutputArray positions, OutputArray votes = noArray()) = 0;
virtual void detect(InputArray edges, InputArray dx, InputArray dy, OutputArray positions, OutputArray votes = noArray()) = 0;
CV_WRAP virtual void detect(InputArray image, OutputArray positions, OutputArray votes = noArray()) = 0;
CV_WRAP virtual void detect(InputArray edges, InputArray dx, InputArray dy, OutputArray positions, OutputArray votes = noArray()) = 0;
//! Canny low threshold.
virtual void setCannyLowThresh(int cannyLowThresh) = 0;
virtual int getCannyLowThresh() const = 0;
CV_WRAP virtual void setCannyLowThresh(int cannyLowThresh) = 0;
CV_WRAP virtual int getCannyLowThresh() const = 0;
//! Canny high threshold.
virtual void setCannyHighThresh(int cannyHighThresh) = 0;
virtual int getCannyHighThresh() const = 0;
CV_WRAP virtual void setCannyHighThresh(int cannyHighThresh) = 0;
CV_WRAP virtual int getCannyHighThresh() const = 0;
//! Minimum distance between the centers of the detected objects.
virtual void setMinDist(double minDist) = 0;
virtual double getMinDist() const = 0;
CV_WRAP virtual void setMinDist(double minDist) = 0;
CV_WRAP virtual double getMinDist() const = 0;
//! Inverse ratio of the accumulator resolution to the image resolution.
virtual void setDp(double dp) = 0;
virtual double getDp() const = 0;
CV_WRAP virtual void setDp(double dp) = 0;
CV_WRAP virtual double getDp() const = 0;
//! Maximal size of inner buffers.
virtual void setMaxBufferSize(int maxBufferSize) = 0;
virtual int getMaxBufferSize() const = 0;
CV_WRAP virtual void setMaxBufferSize(int maxBufferSize) = 0;
CV_WRAP virtual int getMaxBufferSize() const = 0;
};
//! Ballard, D.H. (1981). Generalizing the Hough transform to detect arbitrary shapes. Pattern Recognition 13 (2): 111-122.
//! Detects position only without translation and rotation
class CV_EXPORTS GeneralizedHoughBallard : public GeneralizedHough
class CV_EXPORTS_W GeneralizedHoughBallard : public GeneralizedHough
{
public:
//! R-Table levels.
virtual void setLevels(int levels) = 0;
virtual int getLevels() const = 0;
CV_WRAP virtual void setLevels(int levels) = 0;
CV_WRAP virtual int getLevels() const = 0;
//! The accumulator threshold for the template centers at the detection stage. The smaller it is, the more false positions may be detected.
virtual void setVotesThreshold(int votesThreshold) = 0;
virtual int getVotesThreshold() const = 0;
CV_WRAP virtual void setVotesThreshold(int votesThreshold) = 0;
CV_WRAP virtual int getVotesThreshold() const = 0;
};
//! Guil, N., González-Linares, J.M. and Zapata, E.L. (1999). Bidimensional shape detection using an invariant approach. Pattern Recognition 32 (6): 1025-1038.
//! Detects position, translation and rotation
class CV_EXPORTS GeneralizedHoughGuil : public GeneralizedHough
class CV_EXPORTS_W GeneralizedHoughGuil : public GeneralizedHough
{
public:
//! Angle difference in degrees between two points in feature.

View File

@ -534,13 +534,13 @@ class FuncVariant(object):
class FuncInfo(object):
def __init__(self, classname, name, cname, isconstructor, namespace, isclassmethod):
def __init__(self, classname, name, cname, isconstructor, namespace, is_static):
self.classname = classname
self.name = name
self.cname = cname
self.isconstructor = isconstructor
self.namespace = namespace
self.isclassmethod = isclassmethod
self.is_static = is_static
self.variants = []
def add_variant(self, decl, isphantom=False):
@ -555,8 +555,8 @@ class FuncInfo(object):
else:
classname = ""
if self.isclassmethod:
name += "_cls"
if self.is_static:
name += "_static"
return "pyopencv_" + self.namespace.replace('.','_') + '_' + classname + name
@ -615,7 +615,7 @@ class FuncInfo(object):
return Template(' {"$py_funcname", CV_PY_FN_WITH_KW_($wrap_funcname, $flags), "$py_docstring"},\n'
).substitute(py_funcname = self.variants[0].wname, wrap_funcname=self.get_wrapper_name(),
flags = 'METH_CLASS' if self.isclassmethod else '0', py_docstring = full_docstring)
flags = 'METH_STATIC' if self.is_static else '0', py_docstring = full_docstring)
def gen_code(self, codegen):
all_classes = codegen.classes
@ -632,7 +632,7 @@ class FuncInfo(object):
selfinfo = all_classes[self.classname]
if not self.isconstructor:
amp = "&" if selfinfo.issimple else ""
if self.isclassmethod:
if self.is_static:
pass
elif selfinfo.isalgorithm:
code += gen_template_check_self_algo.substitute(name=selfinfo.name, cname=selfinfo.cname, amp=amp)
@ -652,7 +652,7 @@ class FuncInfo(object):
all_cargs = []
parse_arglist = []
if v.isphantom and ismethod and not self.isclassmethod:
if v.isphantom and ismethod and not self.is_static:
code_args += "_self_"
# declare all the C function arguments,
@ -740,7 +740,7 @@ class FuncInfo(object):
if v.rettype:
code_decl += " " + v.rettype + " retval;\n"
code_fcall += "retval = "
if ismethod and not self.isclassmethod:
if ismethod and not self.is_static:
code_fcall += "_self_->" + self.cname
else:
code_fcall += self.cname
@ -821,7 +821,7 @@ class FuncInfo(object):
#if dump: pprint(vars(classinfo))
if self.isconstructor:
py_name = 'cv.' + classinfo.wname
elif self.isclassmethod:
elif self.is_static:
py_name = '.'.join([self.namespace, classinfo.sname + '_' + self.variants[0].wname])
else:
cname = classinfo.cname + '::' + cname
@ -929,12 +929,12 @@ class PythonWrapperGenerator(object):
namespace = '.'.join(namespace)
isconstructor = name == bareclassname
isclassmethod = False
is_static = False
isphantom = False
mappable = None
for m in decl[2]:
if m == "/S":
isclassmethod = True
is_static = True
elif m == "/phantom":
isphantom = True
cname = cname.replace("::", "_")
@ -948,10 +948,10 @@ class PythonWrapperGenerator(object):
if isconstructor:
name = "_".join(classes[:-1]+[name])
if isclassmethod:
if is_static:
# Add it as a method to the class
func_map = self.classes[classname].methods
func = func_map.setdefault(name, FuncInfo(classname, name, cname, isconstructor, namespace, isclassmethod))
func = func_map.setdefault(name, FuncInfo(classname, name, cname, isconstructor, namespace, is_static))
func.add_variant(decl, isphantom)
# Add it as global function
@ -966,7 +966,7 @@ class PythonWrapperGenerator(object):
else:
func_map = self.namespaces.setdefault(namespace, Namespace()).funcs
func = func_map.setdefault(name, FuncInfo(classname, name, cname, isconstructor, namespace, isclassmethod))
func = func_map.setdefault(name, FuncInfo(classname, name, cname, isconstructor, namespace, is_static))
func.add_variant(decl, isphantom)
if classname and isconstructor:

View File

@ -14,32 +14,235 @@ from tests_common import NewOpenCVTests
class cuda_test(NewOpenCVTests):
def setUp(self):
super(cuda_test, self).setUp()
if not cv.cuda.getCudaEnabledDeviceCount():
self.skipTest("No CUDA-capable device is detected")
def test_cuda_upload_download(self):
npMat = (np.random.random((200, 200, 3)) * 255).astype(np.uint8)
gpuMat = cv.cuda_GpuMat()
gpuMat.upload(npMat)
npMat = (np.random.random((128, 128, 3)) * 255).astype(np.uint8)
cuMat = cv.cuda_GpuMat()
cuMat.upload(npMat)
self.assertTrue(np.allclose(gpuMat.download(), npMat))
self.assertTrue(np.allclose(cuMat.download(), npMat))
def test_cuda_imgproc_cvtColor(self):
npMat = (np.random.random((200, 200, 3)) * 255).astype(np.uint8)
gpuMat = cv.cuda_GpuMat()
gpuMat.upload(npMat)
gpuMat2 = cv.cuda.cvtColor(gpuMat, cv.COLOR_BGR2HSV)
def test_cudaarithm_arithmetic(self):
npMat1 = np.random.random((128, 128, 3)) - 0.5
npMat2 = np.random.random((128, 128, 3)) - 0.5
self.assertTrue(np.allclose(gpuMat2.download(), cv.cvtColor(npMat, cv.COLOR_BGR2HSV)))
cuMat1 = cv.cuda_GpuMat()
cuMat2 = cv.cuda_GpuMat()
cuMat1.upload(npMat1)
cuMat2.upload(npMat2)
def test_cuda_filter_laplacian(self):
npMat = (np.random.random((200, 200)) * 255).astype(np.uint16)
gpuMat = cv.cuda_GpuMat()
gpuMat.upload(npMat)
gpuMat = cv.cuda.createLaplacianFilter(cv.CV_16UC1, -1, ksize=3).apply(gpuMat)
self.assertTrue(np.allclose(cv.cuda.add(cuMat1, cuMat2).download(),
cv.add(npMat1, npMat2)))
self.assertTrue(np.allclose(gpuMat.download(), cv.Laplacian(npMat, cv.CV_16UC1, ksize=3)))
self.assertTrue(np.allclose(cv.cuda.subtract(cuMat1, cuMat2).download(),
cv.subtract(npMat1, npMat2)))
self.assertTrue(np.allclose(cv.cuda.multiply(cuMat1, cuMat2).download(),
cv.multiply(npMat1, npMat2)))
self.assertTrue(np.allclose(cv.cuda.divide(cuMat1, cuMat2).download(),
cv.divide(npMat1, npMat2)))
self.assertTrue(np.allclose(cv.cuda.absdiff(cuMat1, cuMat2).download(),
cv.absdiff(npMat1, npMat2)))
self.assertTrue(np.allclose(cv.cuda.compare(cuMat1, cuMat2, cv.CMP_GE).download(),
cv.compare(npMat1, npMat2, cv.CMP_GE)))
self.assertTrue(np.allclose(cv.cuda.abs(cuMat1).download(),
np.abs(npMat1)))
self.assertTrue(np.allclose(cv.cuda.sqrt(cv.cuda.sqr(cuMat1)).download(),
cv.cuda.abs(cuMat1).download()))
self.assertTrue(np.allclose(cv.cuda.log(cv.cuda.exp(cuMat1)).download(),
npMat1))
self.assertTrue(np.allclose(cv.cuda.pow(cuMat1, 2).download(),
cv.pow(npMat1, 2)))
def test_cudaarithm_logical(self):
npMat1 = (np.random.random((128, 128)) * 255).astype(np.uint8)
npMat2 = (np.random.random((128, 128)) * 255).astype(np.uint8)
cuMat1 = cv.cuda_GpuMat()
cuMat2 = cv.cuda_GpuMat()
cuMat1.upload(npMat1)
cuMat2.upload(npMat2)
self.assertTrue(np.allclose(cv.cuda.bitwise_or(cuMat1, cuMat2).download(),
cv.bitwise_or(npMat1, npMat2)))
self.assertTrue(np.allclose(cv.cuda.bitwise_and(cuMat1, cuMat2).download(),
cv.bitwise_and(npMat1, npMat2)))
self.assertTrue(np.allclose(cv.cuda.bitwise_xor(cuMat1, cuMat2).download(),
cv.bitwise_xor(npMat1, npMat2)))
self.assertTrue(np.allclose(cv.cuda.bitwise_not(cuMat1).download(),
cv.bitwise_not(npMat1)))
self.assertTrue(np.allclose(cv.cuda.min(cuMat1, cuMat2).download(),
cv.min(npMat1, npMat2)))
self.assertTrue(np.allclose(cv.cuda.max(cuMat1, cuMat2).download(),
cv.max(npMat1, npMat2)))
def test_cudabgsegm_existence(self):
#Test at least the existence of wrapped functions for now
bgsub = cv.cuda.createBackgroundSubtractorMOG()
bgsub = cv.cuda.createBackgroundSubtractorMOG2()
self.assertTrue(True) #It is sufficient that no exceptions have been there
def test_cudacodec_existence(self):
#Test at least the existence of wrapped functions for now
try:
writer = cv.cudacodec.createVideoWriter("tmp", (128, 128), 30)
reader = cv.cudacodec.createVideoReader("tmp")
except cv.error as e:
self.assertEqual(e.code, cv.Error.StsNotImplemented)
self.skipTest("NVCUVENC is not installed")
self.assertTrue(True) #It is sufficient that no exceptions have been there
def test_cudafeatures2d(self):
npMat1 = self.get_sample("samples/data/right01.jpg")
npMat2 = self.get_sample("samples/data/right02.jpg")
cuMat1 = cv.cuda_GpuMat()
cuMat2 = cv.cuda_GpuMat()
cuMat1.upload(npMat1)
cuMat2.upload(npMat2)
cuMat1 = cv.cuda.cvtColor(cuMat1, cv.COLOR_RGB2GRAY)
cuMat2 = cv.cuda.cvtColor(cuMat2, cv.COLOR_RGB2GRAY)
fast = cv.cuda_FastFeatureDetector.create()
kps = fast.detectAsync(cuMat1)
orb = cv.cuda_ORB.create()
kps1, descs1 = orb.detectAndComputeAsync(cuMat1, None)
kps2, descs2 = orb.detectAndComputeAsync(cuMat2, None)
bf = cv.cuda_DescriptorMatcher.createBFMatcher(cv.NORM_HAMMING)
matches = bf.match(descs1, descs2)
self.assertGreater(len(matches), 0)
matches = bf.knnMatch(descs1, descs2, 2)
self.assertGreater(len(matches), 0)
matches = bf.radiusMatch(descs1, descs2, 0.1)
self.assertGreater(len(matches), 0)
self.assertTrue(True) #It is sufficient that no exceptions have been there
def test_cudafilters_existence(self):
#Test at least the existence of wrapped functions for now
filter = cv.cuda.createBoxFilter(cv.CV_8UC1, -1, (3, 3))
filter = cv.cuda.createLinearFilter(cv.CV_8UC4, -1, np.eye(3))
filter = cv.cuda.createLaplacianFilter(cv.CV_16UC1, -1, ksize=3)
filter = cv.cuda.createSeparableLinearFilter(cv.CV_8UC1, -1, np.eye(3), np.eye(3))
filter = cv.cuda.createDerivFilter(cv.CV_8UC1, -1, 1, 1, 3)
filter = cv.cuda.createSobelFilter(cv.CV_8UC1, -1, 1, 1)
filter = cv.cuda.createScharrFilter(cv.CV_8UC1, -1, 1, 0)
filter = cv.cuda.createGaussianFilter(cv.CV_8UC1, -1, (3, 3), 16)
filter = cv.cuda.createMorphologyFilter(cv.MORPH_DILATE, cv.CV_32FC1, np.eye(3))
filter = cv.cuda.createBoxMaxFilter(cv.CV_8UC1, (3, 3))
filter = cv.cuda.createBoxMinFilter(cv.CV_8UC1, (3, 3))
filter = cv.cuda.createRowSumFilter(cv.CV_8UC1, cv.CV_32FC1, 3)
filter = cv.cuda.createColumnSumFilter(cv.CV_8UC1, cv.CV_32FC1, 3)
filter = cv.cuda.createMedianFilter(cv.CV_8UC1, 3)
self.assertTrue(True) #It is sufficient that no exceptions have been there
def test_cudafilters_laplacian(self):
npMat = (np.random.random((128, 128)) * 255).astype(np.uint16)
cuMat = cv.cuda_GpuMat()
cuMat.upload(npMat)
self.assertTrue(np.allclose(cv.cuda.createLaplacianFilter(cv.CV_16UC1, -1, ksize=3).apply(cuMat).download(),
cv.Laplacian(npMat, cv.CV_16UC1, ksize=3)))
def test_cudaimgproc(self):
npC1 = (np.random.random((128, 128)) * 255).astype(np.uint8)
npC3 = (np.random.random((128, 128, 3)) * 255).astype(np.uint8)
npC4 = (np.random.random((128, 128, 4)) * 255).astype(np.uint8)
cuC1 = cv.cuda_GpuMat()
cuC3 = cv.cuda_GpuMat()
cuC4 = cv.cuda_GpuMat()
cuC1.upload(npC1)
cuC3.upload(npC3)
cuC4.upload(npC4)
cv.cuda.cvtColor(cuC3, cv.COLOR_RGB2HSV)
cv.cuda.demosaicing(cuC1, cv.cuda.COLOR_BayerGR2BGR_MHT)
cv.cuda.gammaCorrection(cuC3)
cv.cuda.alphaComp(cuC4, cuC4, cv.cuda.ALPHA_XOR)
cv.cuda.calcHist(cuC1)
cv.cuda.equalizeHist(cuC1)
cv.cuda.evenLevels(3, 0, 255)
cv.cuda.meanShiftFiltering(cuC4, 10, 5)
cv.cuda.meanShiftProc(cuC4, 10, 5)
cv.cuda.bilateralFilter(cuC3, 3, 16, 3)
cv.cuda.blendLinear
cv.cuda.meanShiftSegmentation(cuC4, 10, 5, 5).download()
clahe = cv.cuda.createCLAHE()
clahe.apply(cuC1, cv.cuda_Stream.Null());
histLevels = cv.cuda.histEven(cuC3, 20, 0, 255)
cv.cuda.histRange(cuC1, histLevels)
detector = cv.cuda.createCannyEdgeDetector(0, 100)
detector.detect(cuC1)
detector = cv.cuda.createHoughLinesDetector(3, np.pi / 180, 20)
detector.detect(cuC1)
detector = cv.cuda.createHoughSegmentDetector(3, np.pi / 180, 20, 5)
detector.detect(cuC1)
detector = cv.cuda.createHoughCirclesDetector(3, 20, 10, 10, 20, 100)
detector.detect(cuC1)
detector = cv.cuda.createGeneralizedHoughBallard()
#BUG: detect accept only Mat!
#Even if generate_gpumat_decls is set to True, it only wraps overload CUDA functions.
#The problem is that Mat and GpuMat are not fully compatible to enable system-wide overloading
#detector.detect(cuC1, cuC1, cuC1)
detector = cv.cuda.createGeneralizedHoughGuil()
#BUG: same as above..
#detector.detect(cuC1, cuC1, cuC1)
detector = cv.cuda.createHarrisCorner(cv.CV_8UC1, 15, 5, 1)
detector.compute(cuC1)
detector = cv.cuda.createMinEigenValCorner(cv.CV_8UC1, 15, 5, 1)
detector.compute(cuC1)
detector = cv.cuda.createGoodFeaturesToTrackDetector(cv.CV_8UC1)
detector.detect(cuC1)
matcher = cv.cuda.createTemplateMatching(cv.CV_8UC1, cv.TM_CCOEFF_NORMED)
matcher.match(cuC3, cuC3)
self.assertTrue(True) #It is sufficient that no exceptions have been there
def test_cudaimgproc_cvtColor(self):
npMat = (np.random.random((128, 128, 3)) * 255).astype(np.uint8)
cuMat = cv.cuda_GpuMat()
cuMat.upload(npMat)
self.assertTrue(np.allclose(cv.cuda.cvtColor(cuMat, cv.COLOR_BGR2HSV).download(),
cv.cvtColor(npMat, cv.COLOR_BGR2HSV)))
if __name__ == '__main__':
NewOpenCVTests.bootstrap()