Merge branch 2.4

This commit is contained in:
Andrey Kamaev 2013-01-17 18:36:57 +04:00
commit dc0788c864
43 changed files with 1425 additions and 331 deletions

View File

@ -551,7 +551,11 @@ foreach(m ${OPENCV_MODULES_DISABLED_AUTO})
list(APPEND __mdeps ${d})
endif()
endforeach()
list(APPEND OPENCV_MODULES_DISABLED_AUTO_ST "${m}(deps: ${__mdeps})")
if(__mdeps)
list(APPEND OPENCV_MODULES_DISABLED_AUTO_ST "${m}(deps: ${__mdeps})")
else()
list(APPEND OPENCV_MODULES_DISABLED_AUTO_ST "${m}")
endif()
endforeach()
string(REPLACE "opencv_" "" OPENCV_MODULES_DISABLED_AUTO_ST "${OPENCV_MODULES_DISABLED_AUTO_ST}")

View File

@ -174,6 +174,7 @@ endmacro()
# Internal macro; partly disables OpenCV module
macro(__ocv_module_turn_off the_module)
list(REMOVE_ITEM OPENCV_MODULES_DISABLED_AUTO "${the_module}")
list(APPEND OPENCV_MODULES_DISABLED_AUTO "${the_module}")
list(REMOVE_ITEM OPENCV_MODULES_BUILD "${the_module}")
list(REMOVE_ITEM OPENCV_MODULES_PUBLIC "${the_module}")
@ -193,7 +194,7 @@ macro(__ocv_flatten_module_required_dependencies the_module)
break()
elseif(";${OPENCV_MODULES_DISABLED_USER};${OPENCV_MODULES_DISABLED_AUTO};" MATCHES ";${__dep};")
__ocv_module_turn_off(${the_module}) # depends on disabled module
break()
list(APPEND __flattened_deps "${__dep}")
elseif(";${OPENCV_MODULES_BUILD};" MATCHES ";${__dep};")
if(";${__resolved_deps};" MATCHES ";${__dep};")
list(APPEND __flattened_deps "${__dep}") # all dependencies of this module are already resolved
@ -262,6 +263,7 @@ macro(__ocv_flatten_module_dependencies)
foreach(m ${OPENCV_MODULES_BUILD})
set(HAVE_${m} ON CACHE INTERNAL "Module ${m} will be built in current configuration")
__ocv_flatten_module_required_dependencies(${m})
set(OPENCV_MODULE_${m}_DEPS ${OPENCV_MODULE_${m}_DEPS} CACHE INTERNAL "Flattened required dependencies of ${m} module")
endforeach()
foreach(m ${OPENCV_MODULES_BUILD})
@ -286,7 +288,7 @@ macro(__ocv_flatten_module_dependencies)
ocv_list_unique(OPENCV_MODULES_BUILD_)
set(OPENCV_MODULES_PUBLIC ${OPENCV_MODULES_PUBLIC} CACHE INTERNAL "List of OpenCV modules marked for export")
set(OPENCV_MODULES_BUILD ${OPENCV_MODULES_BUILD_} CACHE INTERNAL "List of OpenCV modules included into the build")
set(OPENCV_MODULES_BUILD ${OPENCV_MODULES_BUILD_} CACHE INTERNAL "List of OpenCV modules included into the build")
set(OPENCV_MODULES_DISABLED_AUTO ${OPENCV_MODULES_DISABLED_AUTO} CACHE INTERNAL "List of OpenCV modules implicitly disabled due to dependencies")
endmacro()

View File

@ -16,7 +16,7 @@ Today it is common to have a digital video recording system at your disposal. Th
The source code
===============
As a test case where to show off these using OpenCV I've created a small program that reads in two video files and performs a similarity check between them. This is something you could use to check just how well a new video compressing algorithms works. Let there be a reference (original) video like :download:`this small Megamind clip <../../../../samples/cpp/tutorial_code/highgui/video-input-psnr-ssim/video/Megamind.avi>` and :download:`a compressed version of it <../../../../samples/cpp/tutorial_code/highgui/video-input-psnr-ssim/video/Megamind_bugy.avi>`. You may also find the source code and these video file in the :file:`samples/cpp/tutorial_code/highgui/video-input-psnr-ssim/` folder of the OpenCV source library.
As a test case where to show off these using OpenCV I've created a small program that reads in two video files and performs a similarity check between them. This is something you could use to check just how well a new video compressing algorithms works. Let there be a reference (original) video like :download:`this small Megamind clip <../../../../samples/cpp/tutorial_code/HighGUI/video-input-psnr-ssim/video/Megamind.avi>` and :download:`a compressed version of it <../../../../samples/cpp/tutorial_code/HighGUI/video-input-psnr-ssim/video/Megamind_bugy.avi>`. You may also find the source code and these video file in the :file:`samples/cpp/tutorial_code/HighGUI/video-input-psnr-ssim/` folder of the OpenCV source library.
.. literalinclude:: ../../../../samples/cpp/tutorial_code/HighGUI/video-input-psnr-ssim/video-input-psnr-ssim.cpp
:language: cpp

File diff suppressed because one or more lines are too long

View File

@ -208,7 +208,7 @@ Retina::getMagno
Retina::getParameters
+++++++++++++++++++++
.. ocv:function:: struct Retina::RetinaParameters Retina::getParameters()
.. ocv:function:: Retina::RetinaParameters Retina::getParameters()
Retrieve the current parameters values in a *Retina::RetinaParameters* structure
@ -325,7 +325,7 @@ Retina::RetinaParameters
.. ocv:struct:: Retina::RetinaParameters
This structure merges all the parameters that can be adjusted threw the **Retina::setup()**, **Retina::setupOPLandIPLParvoChannel** and **Retina::setupIPLMagnoChannel** setup methods
Parameters structure for better clarity, check explenations on the comments of methods : setupOPLandIPLParvoChannel and setupIPLMagnoChannel. ::
Parameters structure for better clarity, check explenations on the comments of methods : setupOPLandIPLParvoChannel and setupIPLMagnoChannel. ::
class RetinaParameters{
struct OPLandIplParvoParameters{ // Outer Plexiform Layer (OPL) and Inner Plexiform Layer Parvocellular (IplParvo) parameters

View File

@ -115,7 +115,7 @@ class CV_EXPORTS Retina {
public:
// parameters structure for better clarity, check explenations on the comments of methods : setupOPLandIPLParvoChannel and setupIPLMagnoChannel
struct RetinaParameters{
struct RetinaParameters{
struct OPLandIplParvoParameters{ // Outer Plexiform Layer (OPL) and Inner Plexiform Layer Parvocellular (IplParvo) parameters
OPLandIplParvoParameters():colorMode(true),
normaliseOutput(true),
@ -166,14 +166,14 @@ public:
virtual ~Retina();
/**
* retreive retina input buffer size
*/
Size inputSize();
* retreive retina input buffer size
*/
Size inputSize();
/**
* retreive retina output buffer size
*/
Size outputSize();
* retreive retina output buffer size
*/
Size outputSize();
/**
* try to open an XML retina parameters file to adjust current retina instance setup
@ -190,9 +190,9 @@ public:
* => if the xml file does not exist, then default setup is applied
* => warning, Exceptions are thrown if read XML file is not valid
* @param fs : the open Filestorage which contains retina parameters
* @param applyDefaultSetupOnFailure : set to true if an error must be thrown on error
* @param applyDefaultSetupOnFailure : set to true if an error must be thrown on error
*/
void setup(cv::FileStorage &fs, const bool applyDefaultSetupOnFailure=true);
void setup(cv::FileStorage &fs, const bool applyDefaultSetupOnFailure=true);
/**
* try to open an XML retina parameters file to adjust current retina instance setup
@ -206,7 +206,7 @@ public:
/**
* @return the current parameters setup
*/
struct Retina::RetinaParameters getParameters();
Retina::RetinaParameters getParameters();
/**
* parameters setup display method
@ -305,17 +305,17 @@ public:
*/
void clearBuffers();
/**
* Activate/desactivate the Magnocellular pathway processing (motion information extraction), by default, it is activated
* @param activate: true if Magnocellular output should be activated, false if not
*/
void activateMovingContoursProcessing(const bool activate);
/**
* Activate/desactivate the Magnocellular pathway processing (motion information extraction), by default, it is activated
* @param activate: true if Magnocellular output should be activated, false if not
*/
void activateMovingContoursProcessing(const bool activate);
/**
* Activate/desactivate the Parvocellular pathway processing (contours information extraction), by default, it is activated
* @param activate: true if Parvocellular (contours information extraction) output should be activated, false if not
*/
void activateContoursProcessing(const bool activate);
/**
* Activate/desactivate the Parvocellular pathway processing (contours information extraction), by default, it is activated
* @param activate: true if Parvocellular (contours information extraction) output should be activated, false if not
*/
void activateContoursProcessing(const bool activate);
protected:
// Parameteres setup members

View File

@ -210,9 +210,9 @@ The sample below demonstrates how to use RotatedRect:
.. seealso::
:ocv:cfunc:`CamShift`,
:ocv:func:`fitEllipse`,
:ocv:func:`minAreaRect`,
:ocv:func:`CamShift` ,
:ocv:func:`fitEllipse` ,
:ocv:func:`minAreaRect` ,
:ocv:struct:`CvBox2D`
TermCriteria
@ -1303,7 +1303,7 @@ because ``cvtColor`` , as well as the most of OpenCV functions, calls ``Mat::cre
Mat::addref
---------------
-----------
Increments the reference counter.
.. ocv:function:: void Mat::addref()
@ -1313,7 +1313,7 @@ The method increments the reference counter associated with the matrix data. If
Mat::release
----------------
------------
Decrements the reference counter and deallocates the matrix if needed.
.. ocv:function:: void Mat::release()
@ -1324,7 +1324,7 @@ The method decrements the reference counter associated with the matrix data. Whe
This method can be called manually to force the matrix data deallocation. But since this method is automatically called in the destructor, or by any other method that changes the data pointer, it is usually not needed. The reference counter decrement and check for 0 is an atomic operation on the platforms that support it. Thus, it is safe to operate on the same matrices asynchronously in different threads.
Mat::resize
---------------
-----------
Changes the number of matrix rows.
.. ocv:function:: void Mat::resize( size_t sz )
@ -1337,7 +1337,7 @@ The methods change the number of matrix rows. If the matrix is reallocated, the
Mat::reserve
---------------
------------
Reserves space for the certain number of rows.
.. ocv:function:: void Mat::reserve( size_t sz )
@ -1370,7 +1370,7 @@ The method removes one or more rows from the bottom of the matrix.
Mat::locateROI
------------------
--------------
Locates the matrix header within a parent matrix.
.. ocv:function:: void Mat::locateROI( Size& wholeSize, Point& ofs ) const
@ -1387,7 +1387,7 @@ After you extracted a submatrix from a matrix using
Mat::adjustROI
------------------
--------------
Adjusts a submatrix size and position within the parent matrix.
.. ocv:function:: Mat& Mat::adjustROI( int dtop, int dbottom, int dleft, int dright )
@ -1417,7 +1417,7 @@ The function is used internally by the OpenCV filtering functions, like
Mat::operator()
-------------------
---------------
Extracts a rectangular submatrix.
.. ocv:function:: Mat Mat::operator()( Range rowRange, Range colRange ) const
@ -1955,202 +1955,6 @@ SparseMat
---------
.. ocv:class:: SparseMat
Sparse n-dimensional array. ::
class SparseMat
{
public:
typedef SparseMatIterator iterator;
typedef SparseMatConstIterator const_iterator;
// internal structure - sparse matrix header
struct Hdr
{
...
};
// sparse matrix node - element of a hash table
struct Node
{
size_t hashval;
size_t next;
int idx[CV_MAX_DIM];
};
////////// constructors and destructor //////////
// default constructor
SparseMat();
// creates matrix of the specified size and type
SparseMat(int dims, const int* _sizes, int _type);
// copy constructor
SparseMat(const SparseMat& m);
// converts dense array to the sparse form,
// if try1d is true and matrix is a single-column matrix (Nx1),
// then the sparse matrix will be 1-dimensional.
SparseMat(const Mat& m, bool try1d=false);
// converts an old-style sparse matrix to the new style.
// all the data is copied so that "m" can be safely
// deleted after the conversion
SparseMat(const CvSparseMat* m);
// destructor
~SparseMat();
///////// assignment operations ///////////
// this is an O(1) operation; no data is copied
SparseMat& operator = (const SparseMat& m);
// (equivalent to the corresponding constructor with try1d=false)
SparseMat& operator = (const Mat& m);
// creates a full copy of the matrix
SparseMat clone() const;
// copy all the data to the destination matrix.
// the destination will be reallocated if needed.
void copyTo( SparseMat& m ) const;
// converts 1D or 2D sparse matrix to dense 2D matrix.
// If the sparse matrix is 1D, the result will
// be a single-column matrix.
void copyTo( Mat& m ) const;
// converts arbitrary sparse matrix to dense matrix.
// multiplies all the matrix elements by the specified scalar
void convertTo( SparseMat& m, int rtype, double alpha=1 ) const;
// converts sparse matrix to dense matrix with optional type conversion and scaling.
// When rtype=-1, the destination element type will be the same
// as the sparse matrix element type.
// Otherwise, rtype will specify the depth and
// the number of channels will remain the same as in the sparse matrix
void convertTo( Mat& m, int rtype, double alpha=1, double beta=0 ) const;
// not used now
void assignTo( SparseMat& m, int type=-1 ) const;
// reallocates sparse matrix. If it was already of the proper size and type,
// it is simply cleared with clear(), otherwise,
// the old matrix is released (using release()) and the new one is allocated.
void create(int dims, const int* _sizes, int _type);
// sets all the matrix elements to 0, which means clearing the hash table.
void clear();
// manually increases reference counter to the header.
void addref();
// decreses the header reference counter when it reaches 0.
// the header and all the underlying data are deallocated.
void release();
// converts sparse matrix to the old-style representation.
// all the elements are copied.
operator CvSparseMat*() const;
// size of each element in bytes
// (the matrix nodes will be bigger because of
// element indices and other SparseMat::Node elements).
size_t elemSize() const;
// elemSize()/channels()
size_t elemSize1() const;
// the same is in Mat
int type() const;
int depth() const;
int channels() const;
// returns the array of sizes and 0 if the matrix is not allocated
const int* size() const;
// returns i-th size (or 0)
int size(int i) const;
// returns the matrix dimensionality
int dims() const;
// returns the number of non-zero elements
size_t nzcount() const;
// compute element hash value from the element indices:
// 1D case
size_t hash(int i0) const;
// 2D case
size_t hash(int i0, int i1) const;
// 3D case
size_t hash(int i0, int i1, int i2) const;
// n-D case
size_t hash(const int* idx) const;
// low-level element-access functions,
// special variants for 1D, 2D, 3D cases, and the generic one for n-D case.
//
// return pointer to the matrix element.
// if the element is there (it is non-zero), the pointer to it is returned
// if it is not there and createMissing=false, NULL pointer is returned
// if it is not there and createMissing=true, the new element
// is created and initialized with 0. Pointer to it is returned.
// If the optional hashval pointer is not NULL, the element hash value is
// not computed but *hashval is taken instead.
uchar* ptr(int i0, bool createMissing, size_t* hashval=0);
uchar* ptr(int i0, int i1, bool createMissing, size_t* hashval=0);
uchar* ptr(int i0, int i1, int i2, bool createMissing, size_t* hashval=0);
uchar* ptr(const int* idx, bool createMissing, size_t* hashval=0);
// higher-level element access functions:
// ref<_Tp>(i0,...[,hashval]) - equivalent to *(_Tp*)ptr(i0,...true[,hashval]).
// always return valid reference to the element.
// If it does not exist, it is created.
// find<_Tp>(i0,...[,hashval]) - equivalent to (_const Tp*)ptr(i0,...false[,hashval]).
// return pointer to the element or NULL pointer if the element is not there.
// value<_Tp>(i0,...[,hashval]) - equivalent to
// { const _Tp* p = find<_Tp>(i0,...[,hashval]); return p ? *p : _Tp(); }
// that is, 0 is returned when the element is not there.
// note that _Tp must match the actual matrix type -
// the functions do not do any on-fly type conversion
// 1D case
template<typename _Tp> _Tp& ref(int i0, size_t* hashval=0);
template<typename _Tp> _Tp value(int i0, size_t* hashval=0) const;
template<typename _Tp> const _Tp* find(int i0, size_t* hashval=0) const;
// 2D case
template<typename _Tp> _Tp& ref(int i0, int i1, size_t* hashval=0);
template<typename _Tp> _Tp value(int i0, int i1, size_t* hashval=0) const;
template<typename _Tp> const _Tp* find(int i0, int i1, size_t* hashval=0) const;
// 3D case
template<typename _Tp> _Tp& ref(int i0, int i1, int i2, size_t* hashval=0);
template<typename _Tp> _Tp value(int i0, int i1, int i2, size_t* hashval=0) const;
template<typename _Tp> const _Tp* find(int i0, int i1, int i2, size_t* hashval=0) const;
// n-D case
template<typename _Tp> _Tp& ref(const int* idx, size_t* hashval=0);
template<typename _Tp> _Tp value(const int* idx, size_t* hashval=0) const;
template<typename _Tp> const _Tp* find(const int* idx, size_t* hashval=0) const;
// erase the specified matrix element.
// when there is no such an element, the methods do nothing
void erase(int i0, int i1, size_t* hashval=0);
void erase(int i0, int i1, int i2, size_t* hashval=0);
void erase(const int* idx, size_t* hashval=0);
// return the matrix iterators,
// pointing to the first sparse matrix element,
SparseMatIterator begin();
SparseMatConstIterator begin() const;
// ... or to the point after the last sparse matrix element
SparseMatIterator end();
SparseMatConstIterator end() const;
// and the template forms of the above methods.
// _Tp must match the actual matrix type.
template<typename _Tp> SparseMatIterator_<_Tp> begin();
template<typename _Tp> SparseMatConstIterator_<_Tp> begin() const;
template<typename _Tp> SparseMatIterator_<_Tp> end();
template<typename _Tp> SparseMatConstIterator_<_Tp> end() const;
// return value stored in the sparse martix node
template<typename _Tp> _Tp& value(Node* n);
template<typename _Tp> const _Tp& value(const Node* n) const;
////////////// some internally used methods ///////////////
...
// pointer to the sparse matrix header
Hdr* hdr;
};
The class ``SparseMat`` represents multi-dimensional sparse numerical arrays. Such a sparse array can store elements of any type that
:ocv:class:`Mat` can store. *Sparse* means that only non-zero elements are stored (though, as a result of operations on a sparse matrix, some of its stored elements can actually become 0. It is up to you to detect such elements and delete them using ``SparseMat::erase`` ). The non-zero elements are stored in a hash table that grows when it is filled so that the search time is O(1) in average (regardless of whether element is there or not). Elements can be accessed using the following methods:
@ -2231,6 +2035,204 @@ The class ``SparseMat`` represents multi-dimensional sparse numerical arrays. Su
..
SparseMat::SparseMat
--------------------
Various SparseMat constructors.
.. ocv:function:: SparseMat::SparseMat()
.. ocv:function:: SparseMat::SparseMat(int dims, const int* _sizes, int _type)
.. ocv:function:: SparseMat::SparseMat(const SparseMat& m)
.. ocv:function:: SparseMat::SparseMat(const Mat& m, bool try1d=false)
.. ocv:function:: SparseMat::SparseMat(const CvSparseMat* m)
:param m: Source matrix for copy constructor. If m is dense matrix (ocv:class:`Mat`) then it will be converted to sparse representation.
:param dims: Array dimensionality.
:param _sizes: Sparce matrix size on all dementions.
:param _type: Sparse matrix data type.
:param try1d: if try1d is true and matrix is a single-column matrix (Nx1), then the sparse matrix will be 1-dimensional.
SparseMat::~SparseMat
---------------------
SparseMat object destructor.
.. ocv:function:: SparseMat::~SparseMat()
SparseMat::operator =
---------------------
Provides sparse matrix assignment operators.
.. ocv:function:: SparseMat& SparseMat::operator=(const SparseMat& m)
.. ocv:function:: SparseMat& SparseMat::operator=(const Mat& m)
The last variant is equivalent to the corresponding constructor with try1d=false.
SparseMat::clone
----------------
Creates a full copy of the matrix.
.. ocv:function:: SparseMat SparseMat::clone() const
SparseMat::copyTo
-----------------
Copy all the data to the destination matrix.The destination will be reallocated if needed.
.. ocv:function:: void SparseMat::copyTo( SparseMat& m ) const
.. ocv:function:: void SparseMat::copyTo( Mat& m ) const
:param m: Target for copiing.
The last variant converts 1D or 2D sparse matrix to dense 2D matrix. If the sparse matrix is 1D, the result will be a single-column matrix.
SparceMat::convertTo
--------------------
Convert sparse matrix with possible type change and scaling.
.. ocv:function:: void SparseMat::convertTo( SparseMat& m, int rtype, double alpha=1 ) const
.. ocv:function:: void SparseMat::convertTo( Mat& m, int rtype, double alpha=1, double beta=0 ) const
The first version converts arbitrary sparse matrix to dense matrix and multiplies all the matrix elements by the specified scalar.
The second versiob converts sparse matrix to dense matrix with optional type conversion and scaling.
When rtype=-1, the destination element type will be the same as the sparse matrix element type.
Otherwise, rtype will specify the depth and the number of channels will remain the same as in the sparse matrix.
SparseMat:create
----------------
Reallocates sparse matrix. If it was already of the proper size and type, it is simply cleared with clear(), otherwise,
the old matrix is released (using release()) and the new one is allocated.
.. ocv:function:: void SparseMat::create(int dims, const int* _sizes, int _type)
:param dims: Array dimensionality.
:param _sizes: Sparce matrix size on all dementions.
:param _type: Sparse matrix data type.
SparseMat::clear
----------------
Sets all the matrix elements to 0, which means clearing the hash table.
.. ocv:function:: void SparseMat::clear()
SparseMat::addref
-----------------
Manually increases reference counter to the header.
.. ocv:function:: void SparseMat::addref()
SparseMat::release
------------------
Decreses the header reference counter when it reaches 0. The header and all the underlying data are deallocated.
.. ocv:function:: void SparseMat::release()
SparseMat::CvSparseMat *
------------------------
Converts sparse matrix to the old-style representation. All the elements are copied.
.. ocv:function:: SparseMat::operator CvSparseMat*() const
SparseMat::elemSize
-------------------
Size of each element in bytes (the matrix nodes will be bigger because of element indices and other SparseMat::Node elements).
.. ocv:function:: size_t SparseMat::elemSize() const
SparseMat::elemSize1
--------------------
elemSize()/channels().
.. ocv:function:: size_t SparseMat::elemSize() const
SparseMat::type
---------------
Returns the type of a matrix element.
.. ocv:function:: int SparseMat::type() const
The method returns a sparse matrix element type. This is an identifier compatible with the ``CvMat`` type system, like ``CV_16SC3`` or 16-bit signed 3-channel array, and so on.
SparseMat::depth
----------------
Returns the depth of a sparse matrix element.
.. ocv:function:: int SparseMat::depth() const
The method returns the identifier of the matrix element depth (the type of each individual channel). For example, for a 16-bit signed 3-channel array, the method returns ``CV_16S``
* ``CV_8U`` - 8-bit unsigned integers ( ``0..255`` )
* ``CV_8S`` - 8-bit signed integers ( ``-128..127`` )
* ``CV_16U`` - 16-bit unsigned integers ( ``0..65535`` )
* ``CV_16S`` - 16-bit signed integers ( ``-32768..32767`` )
* ``CV_32S`` - 32-bit signed integers ( ``-2147483648..2147483647`` )
* ``CV_32F`` - 32-bit floating-point numbers ( ``-FLT_MAX..FLT_MAX, INF, NAN`` )
* ``CV_64F`` - 64-bit floating-point numbers ( ``-DBL_MAX..DBL_MAX, INF, NAN`` )
SparseMat::channels
-------------------
Returns the number of matrix channels.
.. ocv:function:: int SparseMat::channels() const
The method returns the number of matrix channels.
SparseMat::size
---------------
Returns the array of sizes or matrix size by i dimention and 0 if the matrix is not allocated.
.. ocv:function:: const int* SparseMat::size() const
.. ocv:function:: int SparseMat::size(int i) const
:param i: Dimention index.
SparseMat::dims
---------------
Returns the matrix dimensionality.
.. ocv:function:: int SparseMat::dims() const
SparseMat::nzcount
------------------
Returns the number of non-zero elements.
.. ocv:function:: size_t SparseMat::nzcount() const
SparseMat::hash
---------------
Compute element hash value from the element indices.
.. ocv:function:: size_t SparseMat::hash(int i0) const
.. ocv:function:: size_t SparseMat::hash(int i0, int i1) const
.. ocv:function:: size_t SparseMat::hash(int i0, int i1, int i2) const
.. ocv:function:: size_t SparseMat::hash(const int* idx) const
SparseMat::ptr
--------------
Low-level element-access functions, special variants for 1D, 2D, 3D cases, and the generic one for n-D case.
.. ocv:function:: uchar* SparseMat::ptr(int i0, bool createMissing, size_t* hashval=0)
.. ocv:function:: uchar* SparseMat::ptr(int i0, int i1, bool createMissing, size_t* hashval=0)
.. ocv:function:: uchar* SparseMat::ptr(int i0, int i1, int i2, bool createMissing, size_t* hashval=0)
.. ocv:function:: uchar* SparseMat::ptr(const int* idx, bool createMissing, size_t* hashval=0)
Return pointer to the matrix element. If the element is there (it is non-zero), the pointer to it is returned.
If it is not there and ``createMissing=false``, NULL pointer is returned. If it is not there and ``createMissing=true``,
the new elementis created and initialized with 0. Pointer to it is returned. If the optional hashval pointer is not ``NULL``,
the element hash value is not computed but ``hashval`` is taken instead.
SparseMat::erase
----------------
Erase the specified matrix element. When there is no such an element, the methods do nothing.
.. ocv:function:: void SparseMat::erase(int i0, int i1, size_t* hashval=0)
.. ocv:function:: void SparseMat::erase(int i0, int i1, int i2, size_t* hashval=0)
.. ocv:function:: void SparseMat::erase(const int* idx, size_t* hashval=0)
SparseMat\_
-----------
.. ocv:class:: SparseMat_
@ -2286,7 +2288,7 @@ Template sparse n-dimensional array class derived from
SparseMatConstIterator_<_Tp> end() const;
};
``SparseMat_`` is a thin wrapper on top of :ocv:class:`SparseMat` created in the same way as ``Mat_`` .
``SparseMat_`` is a thin wrapper on top of :ocv:class:`SparseMat` created in the same way as ``Mat_`` .
It simplifies notation of some operations. ::
int sz[] = {10, 20, 30};
@ -2340,6 +2342,11 @@ Here is example of SIFT use in your application via Algorithm interface: ::
vector<KeyPoint> keypoints;
(*sift)(image, noArray(), keypoints, descriptors);
Algorithm::name
---------------
Returns the algorithm name
.. ocv:function:: string Algorithm::name() const
Algorithm::get
--------------

View File

@ -166,6 +166,12 @@ field of the set is the total number of nodes both occupied and free. When an oc
``CvSet`` is used to represent graphs (:ocv:struct:`CvGraph`), sparse multi-dimensional arrays (:ocv:struct:`CvSparseMat`), and planar subdivisions (:ocv:struct:`CvSubdiv2D`).
CvSetElem
---------
.. ocv:struct:: CvSetElem
The structure is represent single element of :ocv:struct:`CvSet`. It consists of two fields: element data pointer and flags.
CvGraph
-------
@ -174,6 +180,24 @@ CvGraph
The structure ``CvGraph`` is a base for graphs used in OpenCV 1.x. It inherits from
:ocv:struct:`CvSet`, that is, it is considered as a set of vertices. Besides, it contains another set as a member, a set of graph edges. Graphs in OpenCV are represented using adjacency lists format.
CvGraphVtx
----------
.. ocv:struct:: CvGraphVtx
The structure represents single vertex in :ocv:struct:`CvGraph`. It consists of two filds: pointer to first edge and flags.
CvGraphEdge
-----------
.. ocv:struct:: CvGraphEdge
The structure represents edge in :ocv:struct:`CvGraph`. Each edge consists of:
- Two pointers to the starting and ending vertices (vtx[0] and vtx[1] respectively);
- Two pointers to next edges for the starting and ending vertices, where
next[0] points to the next edge in the vtx[0] adjacency list and
next[1] points to the next edge in the vtx[1] adjacency list;
- Weight;
- Flags.
CvGraphScanner
--------------

View File

@ -3241,7 +3241,7 @@ The constructors.
* **SVD::NO_UV** indicates that only a vector of singular values ``w`` is to be processed, while ``u`` and ``vt`` will be set to empty matrices.
* **SVD::FULL_UV** when the matrix is not square, by default the algorithm produces ``u`` and ``vt`` matrices of sufficiently large size for the further ``A`` reconstruction; if, however, ``FULL_UV`` flag is specified, ``u`` and ``vt``will be full-size square orthogonal matrices.
* **SVD::FULL_UV** when the matrix is not square, by default the algorithm produces ``u`` and ``vt`` matrices of sufficiently large size for the further ``A`` reconstruction; if, however, ``FULL_UV`` flag is specified, ``u`` and ``vt`` will be full-size square orthogonal matrices.
The first constructor initializes an empty ``SVD`` structure. The second constructor initializes an empty ``SVD`` structure and then calls
:ocv:funcx:`SVD::operator()` .

View File

@ -113,7 +113,7 @@ Detects keypoints in an image (first variant) or image set (second variant).
:param masks: Masks for each input image specifying where to look for keypoints (optional). ``masks[i]`` is a mask for ``images[i]``.
FeatureDetector::create
---------------------------
-----------------------
Creates a feature detector by its name.
.. ocv:function:: Ptr<FeatureDetector> FeatureDetector::create( const string& detectorType )
@ -219,8 +219,7 @@ StarFeatureDetector
-------------------
.. ocv:class:: StarFeatureDetector : public FeatureDetector
Wrapping class for feature detection using the
:ocv:class:`StarDetector` class. ::
The class implements the keypoint detector introduced by K. Konolige, synonym of ``StarDetector``. ::
class StarFeatureDetector : public FeatureDetector
{
@ -412,7 +411,7 @@ Example of creating ``DynamicAdaptedFeatureDetector`` : ::
DynamicAdaptedFeatureDetector::DynamicAdaptedFeatureDetector
----------------------------------------------------------------
------------------------------------------------------------
The constructor
.. ocv:function:: DynamicAdaptedFeatureDetector::DynamicAdaptedFeatureDetector( const Ptr<AdjusterAdapter>& adjuster, int min_features=400, int max_features=500, int max_iters=5 )
@ -484,7 +483,7 @@ Example: ::
AdjusterAdapter::good
-------------------------
---------------------
Returns false if the detector parameters cannot be adjusted any more.
.. ocv:function:: bool AdjusterAdapter::good() const
@ -497,7 +496,7 @@ Example: ::
}
AdjusterAdapter::create
-------------------------
-----------------------
Creates an adjuster adapter by name
.. ocv:function:: Ptr<AdjusterAdapter> AdjusterAdapter::create( const string& detectorType )
@ -528,3 +527,23 @@ StarAdjuster
StarAdjuster(double initial_thresh = 30.0);
...
};
SurfAdjuster
------------
.. ocv:class:: SurfAdjuster: public AdjusterAdapter
:ocv:class:`AdjusterAdapter` for ``SurfFeatureDetector``. ::
class CV_EXPORTS SurfAdjuster: public AdjusterAdapter
{
public:
SurfAdjuster( double initial_thresh=400.f, double min_thresh=2, double max_thresh=1000 );
virtual void tooFew(int minv, int n_detected);
virtual void tooMany(int maxv, int n_detected);
virtual bool good() const;
virtual Ptr<AdjusterAdapter> clone() const;
...
};

View File

@ -6,6 +6,7 @@ Fast Approximate Nearest Neighbor Search
This section documents OpenCV's interface to the FLANN library. FLANN (Fast Library for Approximate Nearest Neighbors) is a library that contains a collection of algorithms optimized for fast nearest neighbor search in large datasets and for high dimensional features. More information about FLANN can be found in [Muja2009]_ .
.. [Muja2009] Marius Muja, David G. Lowe. Fast Approximate Nearest Neighbors with Automatic Algorithm Configuration, 2009
flann::Index\_
-----------------
@ -150,7 +151,7 @@ flann::Index_<T>::knnSearch
----------------------------
Performs a K-nearest neighbor search for a given query point using the index.
.. ocv:function:: void flann::Index_<T>::knnSearch(const vector<T>& query, vector<int>& indices, vector<float>& dists, int knn, const SearchParams& params)
.. ocv:function:: void flann::Index_<T>::knnSearch(const vector<T>& query, vector<int>& indices, vector<float>& dists, int knn, const SearchParams& params)
.. ocv:function:: void flann::Index_<T>::knnSearch(const Mat& queries, Mat& indices, Mat& dists, int knn, const SearchParams& params)
@ -179,9 +180,9 @@ flann::Index_<T>::radiusSearch
--------------------------------------
Performs a radius nearest neighbor search for a given query point.
.. ocv:function:: int flann::Index_<T>::radiusSearch(const vector<T>& query, vector<int>& indices, vector<float>& dists, float radius, const SearchParams& params)
.. ocv:function:: int flann::Index_<T>::radiusSearch(const vector<T>& query, vector<int>& indices, vector<float>& dists, float radius, const SearchParams& params)
.. ocv:function:: int flann::Index_<T>::radiusSearch(const Mat& query, Mat& indices, Mat& dists, float radius, const SearchParams& params)
.. ocv:function:: int flann::Index_<T>::radiusSearch(const Mat& query, Mat& indices, Mat& dists, float radius, const SearchParams& params)
:param query: The query point

View File

@ -6,7 +6,7 @@ Initalization and Information
gpu::getCudaEnabledDeviceCount
----------------------------------
------------------------------
Returns the number of installed CUDA-enabled devices.
.. ocv:function:: int gpu::getCudaEnabledDeviceCount()
@ -16,7 +16,7 @@ Use this function before any other GPU functions calls. If OpenCV is compiled wi
gpu::setDevice
------------------
--------------
Sets a device and initializes it for the current thread.
.. ocv:function:: void gpu::setDevice(int device)
@ -28,7 +28,7 @@ If the call of this function is omitted, a default device is initialized at the
gpu::getDevice
------------------
--------------
Returns the current device index set by :ocv:func:`gpu::setDevice` or initialized by default.
.. ocv:function:: int gpu::getDevice()
@ -36,7 +36,7 @@ Returns the current device index set by :ocv:func:`gpu::setDevice` or initialize
gpu::resetDevice
------------------
----------------
Explicitly destroys and cleans up all resources associated with the current device in the current process.
.. ocv:function:: void gpu::resetDevice()
@ -47,8 +47,7 @@ Any subsequent API call to this device will reinitialize the device.
gpu::FeatureSet
---------------
Class providing GPU computing features. ::
Enumeration providing GPU computing features. ::
enum FeatureSet
{
@ -64,7 +63,6 @@ Class providing GPU computing features. ::
};
gpu::TargetArchs
----------------
.. ocv:class:: gpu::TargetArchs
@ -132,7 +130,7 @@ Class providing functionality for querying the specified GPU properties. ::
gpu::DeviceInfo::DeviceInfo
-------------------------------
---------------------------
The constructors.
.. ocv:function:: gpu::DeviceInfo::DeviceInfo()
@ -146,7 +144,7 @@ Constructs the ``DeviceInfo`` object for the specified device. If ``device_id``
gpu::DeviceInfo::name
-------------------------
---------------------
Returns the device name.
.. ocv:function:: string gpu::DeviceInfo::name() const
@ -154,7 +152,7 @@ Returns the device name.
gpu::DeviceInfo::majorVersion
---------------------------------
-----------------------------
Returns the major compute capability version.
.. ocv:function:: int gpu::DeviceInfo::majorVersion()
@ -162,7 +160,7 @@ Returns the major compute capability version.
gpu::DeviceInfo::minorVersion
---------------------------------
-----------------------------
Returns the minor compute capability version.
.. ocv:function:: int gpu::DeviceInfo::minorVersion()
@ -170,7 +168,7 @@ Returns the minor compute capability version.
gpu::DeviceInfo::multiProcessorCount
----------------------------------------
------------------------------------
Returns the number of streaming multiprocessors.
.. ocv:function:: int gpu::DeviceInfo::multiProcessorCount()
@ -178,7 +176,7 @@ Returns the number of streaming multiprocessors.
gpu::DeviceInfo::freeMemory
-------------------------------
---------------------------
Returns the amount of free memory in bytes.
.. ocv:function:: size_t gpu::DeviceInfo::freeMemory()
@ -186,7 +184,7 @@ Returns the amount of free memory in bytes.
gpu::DeviceInfo::totalMemory
--------------------------------
----------------------------
Returns the amount of total memory in bytes.
.. ocv:function:: size_t gpu::DeviceInfo::totalMemory()
@ -194,7 +192,7 @@ Returns the amount of total memory in bytes.
gpu::DeviceInfo::supports
-----------------------------
-------------------------
Provides information on GPU feature support.
.. ocv:function:: bool gpu::DeviceInfo::supports( FeatureSet feature_set ) const
@ -206,7 +204,7 @@ This function returns ``true`` if the device has the specified GPU feature. Othe
gpu::DeviceInfo::isCompatible
---------------------------------
-----------------------------
Checks the GPU module and device compatibility.
.. ocv:function:: bool gpu::DeviceInfo::isCompatible()
@ -216,7 +214,7 @@ This function returns ``true`` if the GPU module can be run on the specified dev
gpu::DeviceInfo::deviceID
---------------------------------
-------------------------
Returns system index of the GPU device starting with 0.
.. ocv:function:: int gpu::DeviceInfo::deviceID()

View File

@ -785,7 +785,7 @@ Constructors.
:param fps: Framerate of the created video stream.
:param params: Encoder parameters. See :ocv:class:`gpu::VideoWriter_GPU::EncoderParams` .
:param params: Encoder parameters. See :ocv:struct:`gpu::VideoWriter_GPU::EncoderParams` .
:param format: Surface format of input frames ( ``SF_UYVY`` , ``SF_YUY2`` , ``SF_YV12`` , ``SF_NV12`` , ``SF_IYUV`` , ``SF_BGR`` or ``SF_GRAY``). BGR or gray frames will be converted to YV12 format before encoding, frames with other formats will be used as is.

View File

@ -295,7 +295,12 @@ endif(ANDROID)
add_dependencies(${lib_target} ${api_target})
add_dependencies(${the_module} ${lib_target})
#android test project
# android test project
if(ANDROID AND BUILD_TESTS)
add_subdirectory(android_test)
endif()
# Desktop Java test project.
if((NOT ANDROID) AND BUILD_TESTS)
add_subdirectory(java_test)
endif()

View File

@ -1,8 +1,10 @@
package org.opencv.test;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.IOException;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
@ -426,24 +428,19 @@ public class OpenCVTestCase extends TestCase {
}
protected static String readFile(String path) {
FileInputStream stream = null;
try {
stream = new FileInputStream(new File(path));
FileChannel fc = stream.getChannel();
MappedByteBuffer bb = fc.map(FileChannel.MapMode.READ_ONLY, 0,
fc.size());
return Charset.defaultCharset().decode(bb).toString();
BufferedReader br = new BufferedReader(new FileReader(path));
String line;
StringBuffer result = new StringBuffer();
while ((line = br.readLine()) != null) {
result.append(line);
result.append("\n");
}
return result.toString();
} catch (IOException e) {
OpenCVTestRunner.Log("Failed to read file \"" + path
+ "\". Exception is thrown: " + e);
return null;
} finally {
if (stream != null)
try {
stream.close();
} catch (IOException e) {
OpenCVTestRunner.Log("Exception is thrown: " + e);
}
}
}

View File

@ -77,7 +77,8 @@ public class TermCriteriaTest extends OpenCVTestCase {
public void testToString() {
String actual = tc2.toString();
String expected = "{ type: 2, maxCount: 4, epsilon: 0.001}";
double eps = EPS;
String expected = "{ type: 2, maxCount: 4, epsilon: " + eps + "}";
assertEquals(expected, actual);
}

View File

@ -26,10 +26,9 @@ public class BRIEFDescriptorExtractorTest extends OpenCVTestCase {
@Override
protected void setUp() throws Exception {
super.setUp();
extractor = DescriptorExtractor.create(DescriptorExtractor.BRIEF);
matSize = 100;
super.setUp();
}
public void testComputeListOfMatListOfListOfKeyPointListOfMat() {

View File

@ -80,6 +80,7 @@ public class BruteForceDescriptorMatcherTest extends OpenCVTestCase {
}
protected void setUp() throws Exception {
super.setUp();
matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE);
matSize = 100;
@ -90,8 +91,6 @@ public class BruteForceDescriptorMatcherTest extends OpenCVTestCase {
new DMatch(3, 1, 0, 0.48435235f),
new DMatch(4, 0, 0, 1.0836693f)
};
super.setUp();
}
public void testAdd() {

View File

@ -65,6 +65,7 @@ public class BruteForceHammingDescriptorMatcherTest extends OpenCVTestCase {
}
protected void setUp() throws Exception {
super.setUp();
matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE_HAMMING);
matSize = 100;
@ -73,7 +74,6 @@ public class BruteForceHammingDescriptorMatcherTest extends OpenCVTestCase {
new DMatch(1, 2, 0, 42),
new DMatch(2, 1, 0, 40),
new DMatch(3, 3, 0, 53) };
super.setUp();
}
public void testAdd() {

View File

@ -65,6 +65,7 @@ public class BruteForceHammingLUTDescriptorMatcherTest extends OpenCVTestCase {
}
protected void setUp() throws Exception {
super.setUp();
matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE_HAMMINGLUT);
matSize = 100;
@ -73,7 +74,6 @@ public class BruteForceHammingLUTDescriptorMatcherTest extends OpenCVTestCase {
new DMatch(1, 2, 0, 42),
new DMatch(2, 1, 0, 40),
new DMatch(3, 3, 0, 53) };
super.setUp();
}
public void testAdd() {

View File

@ -80,6 +80,7 @@ public class BruteForceL1DescriptorMatcherTest extends OpenCVTestCase {
}
protected void setUp() throws Exception {
super.setUp();
matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE_L1);
matSize = 100;
@ -90,7 +91,6 @@ public class BruteForceL1DescriptorMatcherTest extends OpenCVTestCase {
new DMatch(3, 1, 0, 2.6545324f),
new DMatch(4, 0, 0, 6.1294870f)
};
super.setUp();
}
public void testAdd() {

View File

@ -85,6 +85,7 @@ public class BruteForceSL2DescriptorMatcherTest extends OpenCVTestCase {
}
protected void setUp() throws Exception {
super.setUp();
matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE_SL2);
matSize = 100;
@ -95,8 +96,6 @@ public class BruteForceSL2DescriptorMatcherTest extends OpenCVTestCase {
new DMatch(3, 1, 0, 0.23459719f),
new DMatch(4, 0, 0, 1.174339f)
};
super.setUp();
}
public void testAdd() {

View File

@ -35,12 +35,10 @@ public class FASTFeatureDetectorTest extends OpenCVTestCase {
@Override
protected void setUp() throws Exception {
super.setUp();
detector = FeatureDetector.create(FeatureDetector.FAST);
truth = new KeyPoint[] { new KeyPoint(32, 27, 7, -1, 254, 0, -1), new KeyPoint(27, 32, 7, -1, 254, 0, -1), new KeyPoint(73, 68, 7, -1, 254, 0, -1),
new KeyPoint(68, 73, 7, -1, 254, 0, -1) };
super.setUp();
}
public void testCreate() {

View File

@ -154,9 +154,9 @@ public class FlannBasedDescriptorMatcherTest extends OpenCVTestCase {
}
protected void setUp() throws Exception {
super.setUp();
matcher = DescriptorMatcher.create(DescriptorMatcher.FLANNBASED);
matSize = 100;
truth = new DMatch[] {
new DMatch(0, 0, 0, 1.049694f),
new DMatch(1, 0, 0, 1.0984558f),
@ -164,8 +164,6 @@ public class FlannBasedDescriptorMatcherTest extends OpenCVTestCase {
new DMatch(3, 1, 0, 0.48435235f),
new DMatch(4, 0, 0, 1.0836693f)
};
super.setUp();
}
public void testAdd() {

View File

@ -31,10 +31,9 @@ public class ORBDescriptorExtractorTest extends OpenCVTestCase {
@Override
protected void setUp() throws Exception {
super.setUp();
extractor = DescriptorExtractor.create(DescriptorExtractor.ORB);
matSize = 100;
super.setUp();
}
public void testComputeListOfMatListOfListOfKeyPointListOfMat() {
@ -101,7 +100,9 @@ public class ORBDescriptorExtractorTest extends OpenCVTestCase {
extractor.write(filename);
String truth = "<?xml version=\"1.0\"?>\n<opencv_storage>\n<name>Feature2D.ORB</name>\n<WTA_K>2</WTA_K>\n<edgeThreshold>31</edgeThreshold>\n<firstLevel>0</firstLevel>\n<nFeatures>500</nFeatures>\n<nLevels>8</nLevels>\n<patchSize>31</patchSize>\n<scaleFactor>1.2000000476837158e+00</scaleFactor>\n<scoreType>0</scoreType>\n</opencv_storage>\n";
assertEquals(truth, readFile(filename));
String actual = readFile(filename);
actual = actual.replaceAll("e\\+000", "e+00"); // NOTE: workaround for different platforms double representation
assertEquals(truth, actual);
}
public void testWriteYml() {
@ -110,7 +111,9 @@ public class ORBDescriptorExtractorTest extends OpenCVTestCase {
extractor.write(filename);
String truth = "%YAML:1.0\nname: \"Feature2D.ORB\"\nWTA_K: 2\nedgeThreshold: 31\nfirstLevel: 0\nnFeatures: 500\nnLevels: 8\npatchSize: 31\nscaleFactor: 1.2000000476837158e+00\nscoreType: 0\n";
assertEquals(truth, readFile(filename));
String actual = readFile(filename);
actual = actual.replaceAll("e\\+000", "e+00"); // NOTE: workaround for different platforms double representation
assertEquals(truth, actual);
}
}

View File

@ -28,6 +28,7 @@ public class SIFTDescriptorExtractorTest extends OpenCVTestCase {
@Override
protected void setUp() throws Exception {
super.setUp();
extractor = DescriptorExtractor.create(DescriptorExtractor.SIFT);
keypoint = new KeyPoint(55.775577545166016f, 44.224422454833984f, 16, 9.754629f, 8617.863f, 1, -1);
matSize = 100;
@ -44,8 +45,6 @@ public class SIFTDescriptorExtractorTest extends OpenCVTestCase {
);
}
};
super.setUp();
}
public void testComputeListOfMatListOfListOfKeyPointListOfMat() {
@ -88,7 +87,9 @@ public class SIFTDescriptorExtractorTest extends OpenCVTestCase {
extractor.write(filename);
String truth = "<?xml version=\"1.0\"?>\n<opencv_storage>\n<name>Feature2D.SIFT</name>\n<contrastThreshold>4.0000000000000001e-02</contrastThreshold>\n<edgeThreshold>10.</edgeThreshold>\n<nFeatures>0</nFeatures>\n<nOctaveLayers>3</nOctaveLayers>\n<sigma>1.6000000000000001e+00</sigma>\n</opencv_storage>\n";
assertEquals(truth, readFile(filename));
String actual = readFile(filename);
actual = actual.replaceAll("e([+-])0(\\d\\d)", "e$1$2"); // NOTE: workaround for different platforms double representation
assertEquals(truth, actual);
}
public void testWriteYml() {
@ -97,7 +98,9 @@ public class SIFTDescriptorExtractorTest extends OpenCVTestCase {
extractor.write(filename);
String truth = "%YAML:1.0\nname: \"Feature2D.SIFT\"\ncontrastThreshold: 4.0000000000000001e-02\nedgeThreshold: 10.\nnFeatures: 0\nnOctaveLayers: 3\nsigma: 1.6000000000000001e+00\n";
assertEquals(truth, readFile(filename));
String actual = readFile(filename);
actual = actual.replaceAll("e([+-])0(\\d\\d)", "e$1$2"); // NOTE: workaround for different platforms double representation
assertEquals(truth, actual);
}
}

View File

@ -42,22 +42,10 @@ public class STARFeatureDetectorTest extends OpenCVTestCase {
}
protected void setUp() throws Exception {
super.setUp();
detector = FeatureDetector.create(FeatureDetector.STAR);
matSize = 200;
truth = new KeyPoint[] {
/*
new KeyPoint(95, 80, 22, -1, 31.595734f, 0, -1),
new KeyPoint(105, 80, 22, -1, 31.595734f, 0, -1),
new KeyPoint(80, 95, 22, -1, 31.595734f, 0, -1),
new KeyPoint(120, 95, 22, -1, 31.595734f, 0, -1),
new KeyPoint(100, 100, 8, -1, -219.90825f, 0, -1),
new KeyPoint(80, 105, 22, -1, 31.595734f, 0, -1),
new KeyPoint(120, 105, 22, -1, 31.595734f, 0, -1),
new KeyPoint(95, 120, 22, -1, 31.595734f, 0, -1),
new KeyPoint(105, 120, 22, -1, 31.595734f, 0, -1)
*/
new KeyPoint( 95, 80, 22, -1, 31.5957f, 0, -1),
new KeyPoint(105, 80, 22, -1, 31.5957f, 0, -1),
new KeyPoint( 80, 95, 22, -1, 31.5957f, 0, -1),
@ -68,8 +56,6 @@ public class STARFeatureDetectorTest extends OpenCVTestCase {
new KeyPoint( 95, 120, 22, -1, 31.5957f, 0, -1),
new KeyPoint(105, 120, 22, -1, 31.5957f, 0, -1)
};
super.setUp();
}
public void testCreate() {

View File

@ -26,10 +26,9 @@ public class SURFDescriptorExtractorTest extends OpenCVTestCase {
@Override
protected void setUp() throws Exception {
super.setUp();
extractor = DescriptorExtractor.create(DescriptorExtractor.SURF);
matSize = 100;
super.setUp();
}
public void testComputeListOfMatListOfListOfKeyPointListOfMat() {

View File

@ -52,18 +52,15 @@ public class SURFFeatureDetectorTest extends OpenCVTestCase {
@Override
protected void setUp() throws Exception {
super.setUp();
detector = FeatureDetector.create(FeatureDetector.SURF);
matSize = 100;
truth = new KeyPoint[] {
new KeyPoint(55.775578f, 55.775578f, 16, 80.245735f, 8617.8633f, 0, -1),
new KeyPoint(44.224422f, 55.775578f, 16, 170.24574f, 8617.8633f, 0, -1),
new KeyPoint(44.224422f, 44.224422f, 16, 260.24573f, 8617.8633f, 0, -1),
new KeyPoint(55.775578f, 44.224422f, 16, 350.24573f, 8617.8633f, 0, -1)
};
super.setUp();
}
public void testCreate() {

View File

@ -219,7 +219,7 @@ void Mat_to_vector_Mat(cv::Mat& mat, std::vector<cv::Mat>& v_mat)
for(int i=0; i<mat.rows; i++)
{
Vec<int, 2> a = mat.at< Vec<int, 2> >(i, 0);
long long addr = (((long long)a[0])<<32) | a[1];
long long addr = (((long long)a[0])<<32) | (a[1]&0xffffffff);
Mat& m = *( (Mat*) addr );
v_mat.push_back(m);
}

View File

@ -262,7 +262,7 @@ public class Converters {
int[] buff = new int[count * 2];
m.get(0, 0, buff);
for (int i = 0; i < count; i++) {
long addr = (((long) buff[i * 2]) << 32) | ((long) buff[i * 2 + 1]);
long addr = (((long) buff[i * 2]) << 32) | (((long) buff[i * 2 + 1]) & 0xffffffffL);
mats.add(new Mat(addr));
}
}

View File

@ -0,0 +1,80 @@
ocv_check_dependencies(opencv_java ${OPENCV_MODULE_opencv_java_OPT_DEPS} ${OPENCV_MODULE_opencv_java_REQ_DEPS})
if(NOT OCV_DEPENDENCIES_FOUND OR NOT ANT_EXECUTABLE)
return()
endif()
# TODO: This has the same name as the Android test project. That project should
# probably be renamed.
project(opencv_test_java)
set(opencv_test_java_bin_dir "${CMAKE_CURRENT_BINARY_DIR}/.build")
set(android_source_dir "${CMAKE_CURRENT_SOURCE_DIR}/../android_test")
set(java_source_dir ${CMAKE_CURRENT_SOURCE_DIR})
# get project sources
file(GLOB_RECURSE opencv_test_java_files RELATIVE "${android_source_dir}" "${android_source_dir}/res/*" "${android_source_dir}/src/*")
ocv_list_filterout(opencv_test_java_files ".svn")
ocv_list_filterout(opencv_test_java_files ".*#.*")
# These are the files that need to be updated for pure Java.
ocv_list_filterout(opencv_test_java_files ".*OpenCVTestCase.*")
ocv_list_filterout(opencv_test_java_files ".*OpenCVTestRunner.*")
# These files aren't for desktop Java.
ocv_list_filterout(opencv_test_java_files ".*android.*")
# These are files updated for pure Java.
file(GLOB_RECURSE modified_files RELATIVE "${java_source_dir}" "${java_source_dir}/src/*")
ocv_list_filterout(modified_files ".svn")
ocv_list_filterout(modified_files ".*#.*")
# These are extra jars needed to run the tests.
file(GLOB_RECURSE lib_files RELATIVE "${java_source_dir}" "${java_source_dir}/lib/*")
ocv_list_filterout(lib_files ".svn")
ocv_list_filterout(lib_files ".*#.*")
# copy sources out from the build tree
set(opencv_test_java_file_deps "")
foreach(f ${opencv_test_java_files})
add_custom_command(
OUTPUT "${opencv_test_java_bin_dir}/${f}"
COMMAND ${CMAKE_COMMAND} -E copy "${android_source_dir}/${f}" "${opencv_test_java_bin_dir}/${f}"
MAIN_DEPENDENCY "${android_source_dir}/${f}"
COMMENT "Copying ${f}")
list(APPEND opencv_test_java_file_deps "${android_source_dir}/${f}" "${opencv_test_java_bin_dir}/${f}")
endforeach()
# Overwrite select Android sources with Java-specific sources.
# Also, copy over the libs we'll need for testing.
foreach(f ${modified_files} ${lib_files})
add_custom_command(
OUTPUT "${opencv_test_java_bin_dir}/${f}"
COMMAND ${CMAKE_COMMAND} -E copy "${java_source_dir}/${f}" "${opencv_test_java_bin_dir}/${f}"
MAIN_DEPENDENCY "${java_source_dir}/${f}"
COMMENT "Copying ${f}")
list(APPEND opencv_test_java_file_deps "${java_source_dir}/${f}")
endforeach()
# Copy the OpenCV jar after it has been generated.
add_custom_command(
OUTPUT "${opencv_test_java_bin_dir}/bin/${JAR_NAME}"
COMMAND ${CMAKE_COMMAND} -E copy "${CMAKE_BINARY_DIR}/bin/${JAR_NAME}" "${opencv_test_java_bin_dir}/bin/${JAR_NAME}"
COMMENT "Copying the OpenCV jar")
add_custom_target(copy_opencv_jar ALL SOURCES "${opencv_test_java_bin_dir}/bin/${JAR_NAME}")
# ${the_module} is the target for the Java jar.
add_dependencies(copy_opencv_jar ${the_module})
# Copy the ant build file.
file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/build.xml" DESTINATION "${opencv_test_java_bin_dir}")
# Create a script for running the Java tests and place it in build/bin.
if(WIN32)
file(WRITE "${CMAKE_BINARY_DIR}/bin/opencv_test_java.cmd" "cd ${opencv_test_java_bin_dir}\nset PATH=${EXECUTABLE_OUTPUT_PATH}/Release;%PATH%\nant -DjavaLibraryPath=${EXECUTABLE_OUTPUT_PATH}/Release buildAndTest")
file(WRITE "${CMAKE_BINARY_DIR}/bin/opencv_test_java_D.cmd" "cd ${opencv_test_java_bin_dir}\nset PATH=${EXECUTABLE_OUTPUT_PATH}/Debug;%PATH%\nant -DjavaLibraryPath=${EXECUTABLE_OUTPUT_PATH}/Debug buildAndTest")
else()
file(WRITE "${CMAKE_BINARY_DIR}/bin/opencv_test_java.sh" "cd ${opencv_test_java_bin_dir};\nant -DjavaLibraryPath=${LIBRARY_OUTPUT_PATH} buildAndTest;\ncd -")
endif()
add_custom_target(${PROJECT_NAME} ALL SOURCES ${opencv_test_java_file_deps})
add_dependencies(opencv_tests ${PROJECT_NAME})

View File

@ -0,0 +1,58 @@
<project>
<path id="master-classpath">
<fileset dir="lib">
<include name="*.jar"/>
</fileset>
<fileset dir="bin">
<include name="*.jar"/>
</fileset>
</path>
<target name="clean">
<delete dir="build"/>
<delete dir="testResults"/>
</target>
<target name="compile">
<mkdir dir="build/classes"/>
<javac sourcepath="" srcdir="src" destdir="build/classes" >
<include name="**/*.java"/>
<classpath refid="master-classpath"/>
</javac>
</target>
<target name="jar">
<mkdir dir="build/jar"/>
<jar destfile="build/jar/opencv-test.jar" basedir="build/classes">
<manifest>
<attribute name="Main-Class" value="org.opencv.test.OpenCVTestRunner"/>
</manifest>
</jar>
</target>
<target name="test">
<mkdir dir="testResults"/>
<junit printsummary="false" haltonfailure="false" haltonerror="false" showoutput="false" logfailedtests="true" maxmemory="256m">
<sysproperty key="java.library.path" path="${javaLibraryPath}"/>
<classpath refid="master-classpath"/>
<classpath>
<pathelement location="build/classes"/>
</classpath>
<formatter type="xml"/>
<batchtest fork="yes" todir="testResults">
<zipfileset src="build/jar/opencv-test.jar" includes="**/*.class" excludes="**/OpenCVTest*">
<exclude name="**/*$*.class"/>
</zipfileset>
</batchtest>
</junit>
</target>
<target name="buildAndTest">
<antcall target="compile"/>
<antcall target="jar"/>
<antcall target="test"/>
</target>
</project>

Binary file not shown.

View File

@ -0,0 +1,492 @@
// TODO: This file is largely a duplicate of the one in android_test.
package org.opencv.test;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.IOException;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.charset.Charset;
import java.util.List;
import junit.framework.TestCase;
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.Point;
import org.opencv.core.Point3;
import org.opencv.core.Rect;
import org.opencv.core.Scalar;
import org.opencv.core.Size;
import org.opencv.features2d.DMatch;
import org.opencv.features2d.KeyPoint;
import org.opencv.highgui.Highgui;
public class OpenCVTestCase extends TestCase {
//change to 'true' to unblock fail on fail("Not yet implemented")
public static final boolean passNYI = true;
protected static boolean isTestCaseEnabled = true;
protected static final int matSize = 10;
protected static final double EPS = 0.001;
protected static final double weakEPS = 0.5;
private static final String TAG = "OpenCVTestCase";
protected Mat dst;
protected Mat truth;
protected Scalar colorBlack;
protected Scalar colorWhite;
// Naming notation: <channels info>_[depth]_[dimensions]_value
// examples: gray0 - single channel 8U 2d Mat filled with 0
// grayRnd - single channel 8U 2d Mat filled with random numbers
// gray0_32f_1d
// TODO: OpenCVTestCase refactorings
// - rename matrices
// - create methods gray0() and create src1 explicitly
// - create some masks
// - use truth member everywhere - remove truth from base class - each test
// fixture should use own truth filed
protected Mat gray0;
protected Mat gray1;
protected Mat gray2;
protected Mat gray3;
protected Mat gray9;
protected Mat gray127;
protected Mat gray128;
protected Mat gray255;
protected Mat grayRnd;
protected Mat gray_16u_256;
protected Mat gray_16s_1024;
protected Mat gray0_32f;
protected Mat gray1_32f;
protected Mat gray3_32f;
protected Mat gray9_32f;
protected Mat gray255_32f;
protected Mat grayE_32f;
protected Mat grayRnd_32f;
protected Mat gray0_32f_1d;
protected Mat gray0_64f;
protected Mat gray0_64f_1d;
protected Mat rgba0;
protected Mat rgba128;
protected Mat rgbLena;
protected Mat grayChess;
protected Mat v1;
protected Mat v2;
@Override
protected void setUp() throws Exception {
super.setUp();
try {
System.loadLibrary("opencv_java");
} catch (SecurityException e) {
System.out.println(e.toString());
System.exit(-1);
} catch (UnsatisfiedLinkError e) {
System.out.println(e.toString());
System.exit(-1);
}
String pwd;
try {
pwd = new File(".").getCanonicalPath() + File.separator;
} catch (IOException e) {
System.out.println(e);
return;
}
OpenCVTestRunner.LENA_PATH = pwd + "res/drawable/lena.jpg";
OpenCVTestRunner.CHESS_PATH = pwd + "res/drawable/chessboard.jpg";
OpenCVTestRunner.LBPCASCADE_FRONTALFACE_PATH = pwd + "res/raw/lbpcascade_frontalface.xml";
assert(new File(OpenCVTestRunner.LENA_PATH).exists());
assert(new File(OpenCVTestRunner.CHESS_PATH).exists());
assert(new File(OpenCVTestRunner.LBPCASCADE_FRONTALFACE_PATH).exists());
dst = new Mat();
assertTrue(dst.empty());
truth = null;
colorBlack = new Scalar(0);
colorWhite = new Scalar(255, 255, 255);
gray0 = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(0));
gray1 = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(1));
gray2 = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(2));
gray3 = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(3));
gray9 = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(9));
gray127 = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(127));
gray128 = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(128));
gray255 = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(255));
grayRnd = new Mat(matSize, matSize, CvType.CV_8U);
Core.randu(grayRnd, 0, 256);
gray_16u_256 = new Mat(matSize, matSize, CvType.CV_16U, new Scalar(256));
gray_16s_1024 = new Mat(matSize, matSize, CvType.CV_16S, new Scalar(1024));
gray0_32f = new Mat(matSize, matSize, CvType.CV_32F, new Scalar(0.0));
gray1_32f = new Mat(matSize, matSize, CvType.CV_32F, new Scalar(1.0));
gray3_32f = new Mat(matSize, matSize, CvType.CV_32F, new Scalar(3.0));
gray9_32f = new Mat(matSize, matSize, CvType.CV_32F, new Scalar(9.0));
gray255_32f = new Mat(matSize, matSize, CvType.CV_32F, new Scalar(255.0));
grayE_32f = new Mat(matSize, matSize, CvType.CV_32F);
grayE_32f = Mat.eye(matSize, matSize, CvType.CV_32FC1);
grayRnd_32f = new Mat(matSize, matSize, CvType.CV_32F);
Core.randu(grayRnd_32f, 0, 256);
gray0_64f = new Mat(matSize, matSize, CvType.CV_64F, new Scalar(0.0));
gray0_32f_1d = new Mat(1, matSize, CvType.CV_32F, new Scalar(0.0));
gray0_64f_1d = new Mat(1, matSize, CvType.CV_64F, new Scalar(0.0));
rgba0 = new Mat(matSize, matSize, CvType.CV_8UC4, Scalar.all(0));
rgba128 = new Mat(matSize, matSize, CvType.CV_8UC4, Scalar.all(128));
rgbLena = Highgui.imread(OpenCVTestRunner.LENA_PATH);
grayChess = Highgui.imread(OpenCVTestRunner.CHESS_PATH, 0);
v1 = new Mat(1, 3, CvType.CV_32F);
v1.put(0, 0, 1.0, 3.0, 2.0);
v2 = new Mat(1, 3, CvType.CV_32F);
v2.put(0, 0, 2.0, 1.0, 3.0);
}
@Override
protected void tearDown() throws Exception {
gray0.release();
gray1.release();
gray2.release();
gray3.release();
gray9.release();
gray127.release();
gray128.release();
gray255.release();
gray_16u_256.release();
gray_16s_1024.release();
grayRnd.release();
gray0_32f.release();
gray1_32f.release();
gray3_32f.release();
gray9_32f.release();
gray255_32f.release();
grayE_32f.release();
grayE_32f.release();
grayRnd_32f.release();
gray0_32f_1d.release();
gray0_64f.release();
gray0_64f_1d.release();
rgba0.release();
rgba128.release();
rgbLena.release();
grayChess.release();
v1.release();
v2.release();
super.tearDown();
}
@Override
protected void runTest() throws Throwable {
// Do nothing if the precondition does not hold.
if (isTestCaseEnabled) {
super.runTest();
} else {
OpenCVTestRunner.Log(TAG + " :: " + "Test case \"" + this.getClass().getName() + "\" disabled!");
}
}
protected Mat getMat(int type, double... vals)
{
return new Mat(matSize, matSize, type, new Scalar(vals));
}
protected Mat makeMask(Mat m, double... vals)
{
m.submat(0, m.rows(), 0, m.cols() / 2).setTo(new Scalar(vals));
return m;
}
public static void fail(String msg) {
if(msg == "Not yet implemented" && passNYI)
return;
TestCase.fail(msg);
}
public static <E extends Number> void assertListEquals(List<E> list1, List<E> list2) {
if (list1.size() != list2.size()) {
throw new UnsupportedOperationException();
}
if (!list1.isEmpty())
{
if (list1.get(0) instanceof Float || list1.get(0) instanceof Double)
throw new UnsupportedOperationException();
}
for (int i = 0; i < list1.size(); i++)
assertEquals(list1.get(i), list2.get(i));
}
public static <E extends Number> void assertListEquals(List<E> list1, List<E> list2, double epsilon) {
if (list1.size() != list2.size()) {
throw new UnsupportedOperationException();
}
for (int i = 0; i < list1.size(); i++)
assertTrue(Math.abs(list1.get(i).doubleValue() - list2.get(i).doubleValue()) <= epsilon);
}
public static <E extends Number> void assertArrayEquals(E[] ar1, E[] ar2, double epsilon) {
if (ar1.length != ar2.length) {
fail("Arrays have different sizes.");
}
for (int i = 0; i < ar1.length; i++)
assertEquals(ar1[i].doubleValue(), ar2[i].doubleValue(), epsilon);
//assertTrue(Math.abs(ar1[i].doubleValue() - ar2[i].doubleValue()) <= epsilon);
}
public static void assertArrayEquals(double[] ar1, double[] ar2, double epsilon) {
if (ar1.length != ar2.length) {
fail("Arrays have different sizes.");
}
for (int i = 0; i < ar1.length; i++)
assertEquals(ar1[i], ar2[i], epsilon);
//assertTrue(Math.abs(ar1[i].doubleValue() - ar2[i].doubleValue()) <= epsilon);
}
public static void assertListMatEquals(List<Mat> list1, List<Mat> list2, double epsilon) {
if (list1.size() != list2.size()) {
throw new UnsupportedOperationException();
}
for (int i = 0; i < list1.size(); i++)
assertMatEqual(list1.get(i), list2.get(i), epsilon);
}
public static void assertListPointEquals(List<Point> list1, List<Point> list2, double epsilon) {
if (list1.size() != list2.size()) {
throw new UnsupportedOperationException();
}
for (int i = 0; i < list1.size(); i++)
assertPointEquals(list1.get(i), list2.get(i), epsilon);
}
public static void assertArrayPointsEquals(Point[] vp1, Point[] vp2, double epsilon) {
if (vp1.length != vp2.length) {
fail("Arrays have different sizes.");
}
for (int i = 0; i < vp1.length; i++)
assertPointEquals(vp1[i], vp2[i], epsilon);
}
public static void assertListPoint3Equals(List<Point3> list1, List<Point3> list2, double epsilon) {
if (list1.size() != list2.size()) {
throw new UnsupportedOperationException();
}
for (int i = 0; i < list1.size(); i++)
assertPoint3Equals(list1.get(i), list2.get(i), epsilon);
}
public static void assertListRectEquals(List<Rect> list1, List<Rect> list2) {
if (list1.size() != list2.size()) {
throw new UnsupportedOperationException();
}
for (int i = 0; i < list1.size(); i++)
assertRectEquals(list1.get(i), list2.get(i));
}
public static void assertRectEquals(Rect expected, Rect actual) {
String msg = "expected:<" + expected + "> but was:<" + actual + ">";
assertEquals(msg, expected.x, actual.x);
assertEquals(msg, expected.y, actual.y);
assertEquals(msg, expected.width, actual.width);
assertEquals(msg, expected.height, actual.height);
}
public static void assertMatEqual(Mat m1, Mat m2) {
compareMats(m1, m2, true);
}
public static void assertMatNotEqual(Mat m1, Mat m2) {
compareMats(m1, m2, false);
}
public static void assertMatEqual(Mat expected, Mat actual, double eps) {
compareMats(expected, actual, eps, true);
}
public static void assertMatNotEqual(Mat expected, Mat actual, double eps) {
compareMats(expected, actual, eps, false);
}
public static void assertKeyPointEqual(KeyPoint expected, KeyPoint actual, double eps) {
String msg = "expected:<" + expected + "> but was:<" + actual + ">";
assertTrue(msg, Math.hypot(expected.pt.x - actual.pt.x, expected.pt.y - actual.pt.y) < eps);
assertTrue(msg, Math.abs(expected.size - actual.size) < eps);
assertTrue(msg, Math.abs(expected.angle - actual.angle) < eps);
assertTrue(msg, Math.abs(expected.response - actual.response) < eps);
assertEquals(msg, expected.octave, actual.octave);
assertEquals(msg, expected.class_id, actual.class_id);
}
public static void assertListKeyPointEquals(List<KeyPoint> expected, List<KeyPoint> actual, double epsilon) {
assertEquals(expected.size(), actual.size());
for (int i = 0; i < expected.size(); i++)
assertKeyPointEqual(expected.get(i), actual.get(i), epsilon);
}
public static void assertDMatchEqual(DMatch expected, DMatch actual, double eps) {
String msg = "expected:<" + expected + "> but was:<" + actual + ">";
assertEquals(msg, expected.queryIdx, actual.queryIdx);
assertEquals(msg, expected.trainIdx, actual.trainIdx);
assertEquals(msg, expected.imgIdx, actual.imgIdx);
assertTrue(msg, Math.abs(expected.distance - actual.distance) < eps);
}
public static void assertScalarEqual(Scalar expected, Scalar actual, double eps) {
String msg = "expected:<" + expected + "> but was:<" + actual + ">";
assertTrue(msg, Math.abs(expected.val[0] - actual.val[0]) < eps);
assertTrue(msg, Math.abs(expected.val[1] - actual.val[1]) < eps);
assertTrue(msg, Math.abs(expected.val[2] - actual.val[2]) < eps);
assertTrue(msg, Math.abs(expected.val[3] - actual.val[3]) < eps);
}
public static void assertArrayDMatchEquals(DMatch[] expected, DMatch[] actual, double epsilon) {
assertEquals(expected.length, actual.length);
for (int i = 0; i < expected.length; i++)
assertDMatchEqual(expected[i], actual[i], epsilon);
}
public static void assertListDMatchEquals(List<DMatch> expected, List<DMatch> actual, double epsilon) {
DMatch expectedArray[] = expected.toArray(new DMatch[0]);
DMatch actualArray[] = actual.toArray(new DMatch[0]);
assertArrayDMatchEquals(expectedArray, actualArray, epsilon);
}
public static void assertPointEquals(Point expected, Point actual, double eps) {
String msg = "expected:<" + expected + "> but was:<" + actual + ">";
assertEquals(msg, expected.x, actual.x, eps);
assertEquals(msg, expected.y, actual.y, eps);
}
public static void assertSizeEquals(Size expected, Size actual, double eps) {
String msg = "expected:<" + expected + "> but was:<" + actual + ">";
assertEquals(msg, expected.width, actual.width, eps);
assertEquals(msg, expected.height, actual.height, eps);
}
public static void assertPoint3Equals(Point3 expected, Point3 actual, double eps) {
String msg = "expected:<" + expected + "> but was:<" + actual + ">";
assertEquals(msg, expected.x, actual.x, eps);
assertEquals(msg, expected.y, actual.y, eps);
assertEquals(msg, expected.z, actual.z, eps);
}
static private void compareMats(Mat expected, Mat actual, boolean isEqualityMeasured) {
if (expected.type() != actual.type() || expected.cols() != actual.cols() || expected.rows() != actual.rows()) {
throw new UnsupportedOperationException("Can not compare " + expected + " and " + actual);
}
if (expected.depth() == CvType.CV_32F || expected.depth() == CvType.CV_64F) {
if (isEqualityMeasured)
throw new UnsupportedOperationException(
"Floating-point Mats must not be checked for exact match. Use assertMatEqual(Mat expected, Mat actual, double eps) instead.");
else
throw new UnsupportedOperationException(
"Floating-point Mats must not be checked for exact match. Use assertMatNotEqual(Mat expected, Mat actual, double eps) instead.");
}
Mat diff = new Mat();
Core.absdiff(expected, actual, diff);
Mat reshaped = diff.reshape(1);
int mistakes = Core.countNonZero(reshaped);
reshaped.release();
diff.release();
if (isEqualityMeasured)
assertTrue("Mats are different in " + mistakes + " points", 0 == mistakes);
else
assertFalse("Mats are equal", 0 == mistakes);
}
static private void compareMats(Mat expected, Mat actual, double eps, boolean isEqualityMeasured) {
if (expected.type() != actual.type() || expected.cols() != actual.cols() || expected.rows() != actual.rows()) {
throw new UnsupportedOperationException("Can not compare " + expected + " and " + actual);
}
Mat diff = new Mat();
Core.absdiff(expected, actual, diff);
double maxDiff = Core.norm(diff, Core.NORM_INF);
if (isEqualityMeasured)
assertTrue("Max difference between expected and actiual Mats is "+ maxDiff + ", that bigger than " + eps,
Core.checkRange(diff, true, 0.0, eps));
else
assertFalse("Max difference between expected and actiual Mats is "+ maxDiff + ", that less than " + eps,
Core.checkRange(diff, true, 0.0, eps));
}
protected static String readFile(String path) {
try {
BufferedReader br = new BufferedReader(new FileReader(path));
String line;
StringBuffer result = new StringBuffer();
while ((line = br.readLine()) != null) {
result.append(line);
result.append("\n");
}
return result.toString();
} catch (IOException e) {
OpenCVTestRunner.Log("Failed to read file \"" + path
+ "\". Exception is thrown: " + e);
return null;
}
}
protected static void writeFile(String path, String content) {
FileOutputStream stream = null;
try {
stream = new FileOutputStream(new File(path));
FileChannel fc = stream.getChannel();
fc.write(Charset.defaultCharset().encode(content));
} catch (IOException e) {
OpenCVTestRunner.Log("Failed to write file \"" + path
+ "\". Exception is thrown: " + e);
} finally {
if (stream != null)
try {
stream.close();
} catch (IOException e) {
OpenCVTestRunner.Log("Exception is thrown: " + e);
}
}
}
}

View File

@ -0,0 +1,44 @@
package org.opencv.test;
import java.io.File;
import java.io.IOException;
import junit.framework.Assert;
import org.opencv.core.Mat;
public class OpenCVTestRunner {
public static String LENA_PATH = "";
public static String CHESS_PATH = "";
public static String LBPCASCADE_FRONTALFACE_PATH = "";
private static String TAG = "opencv_test_java";
public static String getTempFileName(String extension)
{
if (!extension.startsWith("."))
extension = "." + extension;
try {
File tmp = File.createTempFile("OpenCV", extension);
String path = tmp.getAbsolutePath();
tmp.delete();
return path;
} catch (IOException e) {
Log("Failed to get temp file name. Exception is thrown: " + e);
}
return null;
}
static public void Log(String message) {
System.out.println(TAG + " :: " + message);
}
static public void Log(Mat m) {
System.out.println(TAG + " :: " + m + "\n " + m.dump());
}
public static String getOutputFileName(String name)
{
return getTempFileName(name);
}
}

View File

@ -3,6 +3,171 @@ Common Interfaces of Generic Descriptor Matchers
.. highlight:: cpp
OneWayDescriptorBase
--------------------
.. ocv:class:: OneWayDescriptorBase
Class encapsulates functionality for training/loading a set of one way descriptors
and finding the nearest closest descriptor to an input feature. ::
class CV_EXPORTS OneWayDescriptorBase
{
public:
// creates an instance of OneWayDescriptor from a set of training files
// - patch_size: size of the input (large) patch
// - pose_count: the number of poses to generate for each descriptor
// - train_path: path to training files
// - pca_config: the name of the file that contains PCA for small patches (2 times smaller
// than patch_size each dimension
// - pca_hr_config: the name of the file that contains PCA for large patches (of patch_size size)
// - pca_desc_config: the name of the file that contains descriptors of PCA components
OneWayDescriptorBase(CvSize patch_size, int pose_count, const char* train_path = 0, const char* pca_config = 0,
const char* pca_hr_config = 0, const char* pca_desc_config = 0, int pyr_levels = 1,
int pca_dim_high = 100, int pca_dim_low = 100);
OneWayDescriptorBase(CvSize patch_size, int pose_count, const string &pca_filename, const string &train_path = string(), const string &images_list = string(),
float _scale_min = 0.7f, float _scale_max=1.5f, float _scale_step=1.2f, int pyr_levels = 1,
int pca_dim_high = 100, int pca_dim_low = 100);
virtual ~OneWayDescriptorBase();
void clear ();
// Allocate: allocates memory for a given number of descriptors
void Allocate(int train_feature_count);
// AllocatePCADescriptors: allocates memory for pca descriptors
void AllocatePCADescriptors();
// returns patch size
CvSize GetPatchSize() const {return m_patch_size;};
// returns the number of poses for each descriptor
int GetPoseCount() const {return m_pose_count;};
// returns the number of pyramid levels
int GetPyrLevels() const {return m_pyr_levels;};
// returns the number of descriptors
int GetDescriptorCount() const {return m_train_feature_count;};
// CreateDescriptorsFromImage: creates descriptors for each of the input features
// - src: input image
// - features: input features
// - pyr_levels: the number of pyramid levels
void CreateDescriptorsFromImage(IplImage* src, const vector<KeyPoint>& features);
// CreatePCADescriptors: generates descriptors for PCA components, needed for fast generation of feature descriptors
void CreatePCADescriptors();
// returns a feature descriptor by feature index
const OneWayDescriptor* GetDescriptor(int desc_idx) const {return &m_descriptors[desc_idx];};
// FindDescriptor: finds the closest descriptor
// - patch: input image patch
// - desc_idx: output index of the closest descriptor to the input patch
// - pose_idx: output index of the closest pose of the closest descriptor to the input patch
// - distance: distance from the input patch to the closest feature pose
// - _scales: scales of the input patch for each descriptor
// - scale_ranges: input scales variation (float[2])
void FindDescriptor(IplImage* patch, int& desc_idx, int& pose_idx, float& distance, float* _scale = 0, float* scale_ranges = 0) const;
// - patch: input image patch
// - n: number of the closest indexes
// - desc_idxs: output indexes of the closest descriptor to the input patch (n)
// - pose_idx: output indexes of the closest pose of the closest descriptor to the input patch (n)
// - distances: distance from the input patch to the closest feature pose (n)
// - _scales: scales of the input patch
// - scale_ranges: input scales variation (float[2])
void FindDescriptor(IplImage* patch, int n, vector<int>& desc_idxs, vector<int>& pose_idxs,
vector<float>& distances, vector<float>& _scales, float* scale_ranges = 0) const;
// FindDescriptor: finds the closest descriptor
// - src: input image
// - pt: center of the feature
// - desc_idx: output index of the closest descriptor to the input patch
// - pose_idx: output index of the closest pose of the closest descriptor to the input patch
// - distance: distance from the input patch to the closest feature pose
void FindDescriptor(IplImage* src, cv::Point2f pt, int& desc_idx, int& pose_idx, float& distance) const;
// InitializePoses: generates random poses
void InitializePoses();
// InitializeTransformsFromPoses: generates 2x3 affine matrices from poses (initializes m_transforms)
void InitializeTransformsFromPoses();
// InitializePoseTransforms: subsequently calls InitializePoses and InitializeTransformsFromPoses
void InitializePoseTransforms();
// InitializeDescriptor: initializes a descriptor
// - desc_idx: descriptor index
// - train_image: image patch (ROI is supported)
// - feature_label: feature textual label
void InitializeDescriptor(int desc_idx, IplImage* train_image, const char* feature_label);
void InitializeDescriptor(int desc_idx, IplImage* train_image, const KeyPoint& keypoint, const char* feature_label);
// InitializeDescriptors: load features from an image and create descriptors for each of them
void InitializeDescriptors(IplImage* train_image, const vector<KeyPoint>& features,
const char* feature_label = "", int desc_start_idx = 0);
// Write: writes this object to a file storage
// - fs: output filestorage
void Write (FileStorage &fs) const;
// Read: reads OneWayDescriptorBase object from a file node
// - fn: input file node
void Read (const FileNode &fn);
// LoadPCADescriptors: loads PCA descriptors from a file
// - filename: input filename
int LoadPCADescriptors(const char* filename);
// LoadPCADescriptors: loads PCA descriptors from a file node
// - fn: input file node
int LoadPCADescriptors(const FileNode &fn);
// SavePCADescriptors: saves PCA descriptors to a file
// - filename: output filename
void SavePCADescriptors(const char* filename);
// SavePCADescriptors: saves PCA descriptors to a file storage
// - fs: output file storage
void SavePCADescriptors(CvFileStorage* fs) const;
// GeneratePCA: calculate and save PCA components and descriptors
// - img_path: path to training PCA images directory
// - images_list: filename with filenames of training PCA images
void GeneratePCA(const char* img_path, const char* images_list, int pose_count=500);
// SetPCAHigh: sets the high resolution pca matrices (copied to internal structures)
void SetPCAHigh(CvMat* avg, CvMat* eigenvectors);
// SetPCALow: sets the low resolution pca matrices (copied to internal structures)
void SetPCALow(CvMat* avg, CvMat* eigenvectors);
int GetLowPCA(CvMat** avg, CvMat** eigenvectors)
{
*avg = m_pca_avg;
*eigenvectors = m_pca_eigenvectors;
return m_pca_dim_low;
};
int GetPCADimLow() const {return m_pca_dim_low;};
int GetPCADimHigh() const {return m_pca_dim_high;};
void ConvertDescriptorsArrayToTree(); // Converting pca_descriptors array to KD tree
// GetPCAFilename: get default PCA filename
static string GetPCAFilename () { return "pca.yml"; }
virtual bool empty() const { return m_train_feature_count <= 0 ? true : false; }
protected:
...
};
OneWayDescriptorMatcher
-----------------------
.. ocv:class:: OneWayDescriptorMatcher : public GenericDescriptorMatcher
@ -59,8 +224,89 @@ Wrapping class for computing, matching, and classifying descriptors using the
...
};
FernClassifier
--------------
.. ocv:class:: FernClassifier
::
class CV_EXPORTS FernClassifier
{
public:
FernClassifier();
FernClassifier(const FileNode& node);
FernClassifier(const vector<vector<Point2f> >& points,
const vector<Mat>& refimgs,
const vector<vector<int> >& labels=vector<vector<int> >(),
int _nclasses=0, int _patchSize=PATCH_SIZE,
int _signatureSize=DEFAULT_SIGNATURE_SIZE,
int _nstructs=DEFAULT_STRUCTS,
int _structSize=DEFAULT_STRUCT_SIZE,
int _nviews=DEFAULT_VIEWS,
int _compressionMethod=COMPRESSION_NONE,
const PatchGenerator& patchGenerator=PatchGenerator());
virtual ~FernClassifier();
virtual void read(const FileNode& n);
virtual void write(FileStorage& fs, const String& name=String()) const;
virtual void trainFromSingleView(const Mat& image,
const vector<KeyPoint>& keypoints,
int _patchSize=PATCH_SIZE,
int _signatureSize=DEFAULT_SIGNATURE_SIZE,
int _nstructs=DEFAULT_STRUCTS,
int _structSize=DEFAULT_STRUCT_SIZE,
int _nviews=DEFAULT_VIEWS,
int _compressionMethod=COMPRESSION_NONE,
const PatchGenerator& patchGenerator=PatchGenerator());
virtual void train(const vector<vector<Point2f> >& points,
const vector<Mat>& refimgs,
const vector<vector<int> >& labels=vector<vector<int> >(),
int _nclasses=0, int _patchSize=PATCH_SIZE,
int _signatureSize=DEFAULT_SIGNATURE_SIZE,
int _nstructs=DEFAULT_STRUCTS,
int _structSize=DEFAULT_STRUCT_SIZE,
int _nviews=DEFAULT_VIEWS,
int _compressionMethod=COMPRESSION_NONE,
const PatchGenerator& patchGenerator=PatchGenerator());
virtual int operator()(const Mat& img, Point2f kpt, vector<float>& signature) const;
virtual int operator()(const Mat& patch, vector<float>& signature) const;
virtual void clear();
virtual bool empty() const;
void setVerbose(bool verbose);
int getClassCount() const;
int getStructCount() const;
int getStructSize() const;
int getSignatureSize() const;
int getCompressionMethod() const;
Size getPatchSize() const;
struct Feature
{
uchar x1, y1, x2, y2;
Feature() : x1(0), y1(0), x2(0), y2(0) {}
Feature(int _x1, int _y1, int _x2, int _y2)
: x1((uchar)_x1), y1((uchar)_y1), x2((uchar)_x2), y2((uchar)_y2)
{}
template<typename _Tp> bool operator ()(const Mat_<_Tp>& patch) const
{ return patch(y1,x1) > patch(y2, x2); }
};
enum
{
PATCH_SIZE = 31,
DEFAULT_STRUCTS = 50,
DEFAULT_STRUCT_SIZE = 9,
DEFAULT_VIEWS = 5000,
DEFAULT_SIGNATURE_SIZE = 176,
COMPRESSION_NONE = 0,
COMPRESSION_RANDOM_PROJ = 1,
COMPRESSION_PCA = 2,
DEFAULT_COMPRESSION_METHOD = COMPRESSION_NONE
};
protected:
...
};
FernDescriptorMatcher
---------------------

View File

@ -124,7 +124,7 @@ CvBoostTree
-----------
.. ocv:class:: CvBoostTree : public CvDTree
The weak tree classifier, a component of the boosted tree classifier :ocv:class:`CvBoost`, is a derivative of :ocv:class:`CvDTree`. Normally, there is no need to use the weak classifiers directly. However, they can be accessed as elements of the sequence :ocv:member:`CvBoost::weak`, retrieved by :ocv:func:`CvBoost::get_weak_predictors`.
The weak tree classifier, a component of the boosted tree classifier :ocv:class:`CvBoost`, is a derivative of :ocv:class:`CvDTree`. Normally, there is no need to use the weak classifiers directly. However, they can be accessed as elements of the sequence ``CvBoost::weak``, retrieved by :ocv:func:`CvBoost::get_weak_predictors`.
.. note:: In case of LogitBoost and Gentle AdaBoost, each weak predictor is a regression tree, rather than a classification tree. Even in case of Discrete AdaBoost and Real AdaBoost, the ``CvBoostTree::predict`` return value (:ocv:member:`CvDTreeNode::value`) is not an output class label. A negative value "votes" for class #0, a positive value - for class #1. The votes are weighted. The weight of each individual tree may be increased or decreased using the method ``CvBoostTree::scale``.
@ -233,4 +233,3 @@ CvBoost::get_data
Returns used train data of the boosted tree classifier.
.. ocv:function:: const CvDTreeTrainData* CvBoost::get_data() const

View File

@ -156,7 +156,7 @@ CvStatModel::predict
--------------------
Predicts the response for a sample.
.. ocv:function:: float CvStatModel::predict( const Mat& sample, <prediction_params> ) const
.. ocv:function:: float CvStatModel::predict( const Mat& sample, ... ) const
The method is used to predict the response for a new sample. In case of a classification, the method returns the class label. In case of a regression, the method returns the output function value. The input sample must have as many components as the ``train_data`` passed to ``train`` contains. If the ``var_idx`` parameter is passed to ``train``, it is remembered and then is used to extract only the necessary components from the input sample in the method ``predict``.

View File

@ -148,7 +148,7 @@ Brute-force descriptor matcher. For each descriptor in the first set, this match
The class ``BruteForceMatcher_OCL_base`` has an interface similar to the class :ocv:class:`DescriptorMatcher`. It has two groups of ``match`` methods: for matching descriptors of one image with another image or with an image set. Also, all functions have an alternative to save results either to the GPU memory or to the CPU memory. ``BruteForceMatcher_OCL_base`` supports only the ``L1<float>``, ``L2<float>``, and ``Hamming`` distance types.
.. seealso:: :ocv:class:`DescriptorMatcher`, :ocv:class:`BruteForceMatcher`
.. seealso:: :ocv:class:`DescriptorMatcher`, :ocv:class:`BFMatcher`

View File

@ -56,7 +56,7 @@ Class providing memory buffers for :ocv:func:`ocl::matchTemplate` function, plus
You can use field `user_block_size` to set specific block size for :ocv:func:`ocl::matchTemplate` function. If you leave its default value `Size(0,0)` then automatic estimation of block size will be used (which is optimized for speed). By varying `user_block_size` you can reduce memory requirements at the cost of speed.
ocl::matchTemplate
----------------------
------------------
Computes a proximity map for a raster template and an image where the template is searched for.
.. ocv:function:: void ocl::matchTemplate(const oclMat& image, const oclMat& templ, oclMat& result, int method)
@ -71,7 +71,7 @@ Computes a proximity map for a raster template and an image where the template i
:param method: Specifies the way to compare the template with the image.
:param buf: Optional buffer to avoid extra memory allocations and to adjust some specific parameters. See :ocv:class:`ocl::MatchTemplateBuf`.
:param buf: Optional buffer to avoid extra memory allocations and to adjust some specific parameters. See :ocv:struct:`ocl::MatchTemplateBuf`.
The following methods are supported for the ``CV_8U`` depth images for now:

View File

@ -221,6 +221,7 @@ Implementation of the camera parameters refinement algorithm which minimizes sum
detail::WaveCorrectKind
-----------------------
.. ocv:class:: detail::WaveCorrectKind
Wave correction kind. ::

View File

@ -287,7 +287,7 @@ The function finds all of the motion segments and marks them in ``segmask`` with
CamShift
------------
--------
Finds an object center, size, and orientation.
.. ocv:function:: RotatedRect CamShift( InputArray probImage, Rect& window, TermCriteria criteria )
@ -316,7 +316,7 @@ See the OpenCV sample ``camshiftdemo.c`` that tracks colored objects.
meanShift
-------------
---------
Finds an object on a back projection image.
.. ocv:function:: int meanShift( InputArray probImage, Rect& window, TermCriteria criteria )