2012-10-17 15:12:04 +08:00
|
|
|
/*M///////////////////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
|
|
|
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
|
|
|
//
|
|
|
|
// By downloading, copying, installing or using the software you agree to this license.
|
|
|
|
// If you do not agree to this license, do not download, install,
|
|
|
|
// copy or use the software.
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// License Agreement
|
|
|
|
// For Open Source Computer Vision Library
|
|
|
|
//
|
|
|
|
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
|
|
|
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
|
|
|
// Third party copyrights are property of their respective owners.
|
|
|
|
//
|
|
|
|
// Redistribution and use in source and binary forms, with or without modification,
|
|
|
|
// are permitted provided that the following conditions are met:
|
|
|
|
//
|
|
|
|
// * Redistribution's of source code must retain the above copyright notice,
|
|
|
|
// this list of conditions and the following disclaimer.
|
|
|
|
//
|
|
|
|
// * Redistribution's in binary form must reproduce the above copyright notice,
|
|
|
|
// this list of conditions and the following disclaimer in the documentation
|
|
|
|
// and/or other materials provided with the distribution.
|
|
|
|
//
|
|
|
|
// * The name of the copyright holders may not be used to endorse or promote products
|
|
|
|
// derived from this software without specific prior written permission.
|
|
|
|
//
|
|
|
|
// This software is provided by the copyright holders and contributors "as is" and
|
|
|
|
// any express or implied warranties, including, but not limited to, the implied
|
|
|
|
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
|
|
|
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
|
|
|
// indirect, incidental, special, exemplary, or consequential damages
|
|
|
|
// (including, but not limited to, procurement of substitute goods or services;
|
|
|
|
// loss of use, data, or profits; or business interruption) however caused
|
|
|
|
// and on any theory of liability, whether in contract, strict liability,
|
|
|
|
// or tort (including negligence or otherwise) arising in any way out of
|
|
|
|
// the use of this software, even if advised of the possibility of such damage.
|
|
|
|
//
|
|
|
|
//M*/
|
|
|
|
|
|
|
|
#include "precomp.hpp"
|
|
|
|
|
2017-11-24 22:34:02 +08:00
|
|
|
#include "opencv2/core/opencl/ocl_defs.hpp"
|
|
|
|
|
2012-10-17 15:12:04 +08:00
|
|
|
using namespace cv;
|
|
|
|
using namespace cv::detail;
|
2013-08-28 19:45:13 +08:00
|
|
|
using namespace cv::cuda;
|
2012-10-17 15:12:04 +08:00
|
|
|
|
2014-08-22 21:33:24 +08:00
|
|
|
#ifdef HAVE_OPENCV_XFEATURES2D
|
|
|
|
#include "opencv2/xfeatures2d.hpp"
|
2014-10-17 18:22:02 +08:00
|
|
|
using xfeatures2d::SURF;
|
2018-08-02 15:58:49 +08:00
|
|
|
using xfeatures2d::SIFT;
|
2018-10-11 03:26:19 +08:00
|
|
|
#else
|
|
|
|
# if defined(_MSC_VER)
|
|
|
|
# pragma warning(disable:4702) // unreachable code
|
|
|
|
# endif
|
2014-10-17 20:09:09 +08:00
|
|
|
#endif
|
2014-10-17 18:22:02 +08:00
|
|
|
|
2017-08-18 00:32:53 +08:00
|
|
|
#ifdef HAVE_OPENCV_CUDAIMGPROC
|
|
|
|
# include "opencv2/cudaimgproc.hpp"
|
|
|
|
#endif
|
|
|
|
|
2012-10-17 15:12:04 +08:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
struct DistIdxPair
|
|
|
|
{
|
|
|
|
bool operator<(const DistIdxPair &other) const { return dist < other.dist; }
|
|
|
|
double dist;
|
|
|
|
int idx;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2013-05-30 22:44:33 +08:00
|
|
|
struct MatchPairsBody : ParallelLoopBody
|
2012-10-17 15:12:04 +08:00
|
|
|
{
|
2013-02-25 00:14:01 +08:00
|
|
|
MatchPairsBody(FeaturesMatcher &_matcher, const std::vector<ImageFeatures> &_features,
|
|
|
|
std::vector<MatchesInfo> &_pairwise_matches, std::vector<std::pair<int,int> > &_near_pairs)
|
2012-10-17 15:12:04 +08:00
|
|
|
: matcher(_matcher), features(_features),
|
|
|
|
pairwise_matches(_pairwise_matches), near_pairs(_near_pairs) {}
|
|
|
|
|
2018-03-15 21:16:59 +08:00
|
|
|
void operator ()(const Range &r) const CV_OVERRIDE
|
2012-10-17 15:12:04 +08:00
|
|
|
{
|
2017-02-28 23:50:09 +08:00
|
|
|
cv::RNG rng = cv::theRNG(); // save entry rng state
|
2012-10-17 15:12:04 +08:00
|
|
|
const int num_images = static_cast<int>(features.size());
|
2013-05-30 22:44:33 +08:00
|
|
|
for (int i = r.start; i < r.end; ++i)
|
2012-10-17 15:12:04 +08:00
|
|
|
{
|
2017-02-28 23:50:09 +08:00
|
|
|
cv::theRNG() = cv::RNG(rng.state + i); // force "stable" RNG seed for each processed pair
|
|
|
|
|
2012-10-17 15:12:04 +08:00
|
|
|
int from = near_pairs[i].first;
|
|
|
|
int to = near_pairs[i].second;
|
|
|
|
int pair_idx = from*num_images + to;
|
|
|
|
|
|
|
|
matcher(features[from], features[to], pairwise_matches[pair_idx]);
|
|
|
|
pairwise_matches[pair_idx].src_img_idx = from;
|
|
|
|
pairwise_matches[pair_idx].dst_img_idx = to;
|
|
|
|
|
|
|
|
size_t dual_pair_idx = to*num_images + from;
|
|
|
|
|
|
|
|
pairwise_matches[dual_pair_idx] = pairwise_matches[pair_idx];
|
|
|
|
pairwise_matches[dual_pair_idx].src_img_idx = to;
|
|
|
|
pairwise_matches[dual_pair_idx].dst_img_idx = from;
|
|
|
|
|
|
|
|
if (!pairwise_matches[pair_idx].H.empty())
|
|
|
|
pairwise_matches[dual_pair_idx].H = pairwise_matches[pair_idx].H.inv();
|
|
|
|
|
|
|
|
for (size_t j = 0; j < pairwise_matches[dual_pair_idx].matches.size(); ++j)
|
|
|
|
std::swap(pairwise_matches[dual_pair_idx].matches[j].queryIdx,
|
|
|
|
pairwise_matches[dual_pair_idx].matches[j].trainIdx);
|
|
|
|
LOG(".");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
FeaturesMatcher &matcher;
|
2013-02-25 00:14:01 +08:00
|
|
|
const std::vector<ImageFeatures> &features;
|
|
|
|
std::vector<MatchesInfo> &pairwise_matches;
|
|
|
|
std::vector<std::pair<int,int> > &near_pairs;
|
2012-10-17 15:12:04 +08:00
|
|
|
|
|
|
|
private:
|
|
|
|
void operator =(const MatchPairsBody&);
|
|
|
|
};
|
|
|
|
|
|
|
|
|
Merge pull request #6933 from hrnr:gsoc_all
[GSOC] New camera model for stitching pipeline
* implement estimateAffine2D
estimates affine transformation using robust RANSAC method.
* uses RANSAC framework in calib3d
* includes accuracy test
* uses SVD decomposition for solving 3 point equation
* implement estimateAffinePartial2D
estimates limited affine transformation
* includes accuracy test
* stitching: add affine matcher
initial version of matcher that estimates affine transformation
* stitching: added affine transform estimator
initial version of estimator that simply chain transformations in homogeneous coordinates
* calib3d: rename estimateAffine3D test
test Calib3d_EstimateAffineTransform rename to Calib3d_EstimateAffine3D. This is more descriptive and prevents confusion with estimateAffine2D tests.
* added perf test for estimateAffine functions
tests both estimateAffine2D and estimateAffinePartial2D
* calib3d: compare error in square in estimateAffine2D
* incorporates fix from #6768
* rerun affine estimation on inliers
* stitching: new API for parallel feature finding
due to ABI breakage new functionality is added to `FeaturesFinder2`, `SurfFeaturesFinder2` and `OrbFeaturesFinder2`
* stitching: add tests for parallel feature find API
* perf test (about linear speed up)
* accuracy test compares results with serial version
* stitching: use dynamic_cast to overcome ABI issues
adding parallel API to FeaturesFinder breaks ABI. This commit uses dynamic_cast and hardcodes thread-safe finders to avoid breaking ABI.
This should be replaced by proper method similar to FeaturesMatcher on next ABI break.
* use estimateAffinePartial2D in AffineBestOf2NearestMatcher
* add constructor to AffineBestOf2NearestMatcher
* allows to choose between full affine transform and partial affine transform. Other params are the as for BestOf2NearestMatcher
* added protected field
* samples: stitching_detailed support affine estimator and matcher
* added new flags to choose matcher and estimator
* stitching: rework affine matcher
represent transformation in homogeneous coordinates
affine matcher: remove duplicite code
rework flow to get rid of duplicite code
affine matcher: do not center points to (0, 0)
it is not needed for affine model. it should not affect estimation in any way.
affine matcher: remove unneeded cv namespacing
* stitching: add stub bundle adjuster
* adds stub bundle adjuster that does nothing
* can be used in place of standard bundle adjusters to omit bundle adjusting step
* samples: stitching detailed, support no budle adjust
* uses new NoBundleAdjuster
* added affine warper
* uses R to get whole affine transformation and propagates rotation and translation to plane warper
* add affine warper factory class
* affine warper: compensate transformation
* samples: stitching_detailed add support for affine warper
* add Stitcher::create method
this method follows similar constructor methods and returns smart pointer. This allows constructing Stitcher according to OpenCV guidelines.
* supports multiple stitcher configurations (PANORAMA and SCANS) for convenient setup
* returns cv::Ptr
* stitcher: dynamicaly determine correct estimator
we need to use affine estimator for affine matcher
* preserves ABI (but add hints for ABI 4)
* uses dynamic_cast hack to inject correct estimator
* sample stitching: add support for multiple modes
shows how to use different configurations of stitcher easily (panorama stitching and scans affine model)
* stitcher: find features in parallel
use new FeatureFinder API to find features in parallel. Parallelized using TBB.
* stitching: disable parallel feature finding for OCL
it does not bring much speedup to run features finder in parallel when OpenCL is enabled, because finder needs to wait for OCL device.
Also, currently ORB is not thread-safe when OCL is enabled.
* stitching: move matcher tests
move matchers tests perf_stich.cpp -> perf_matchers.cpp
* stitching: add affine stiching integration test
test basic affine stitching (SCANS mode of stitcher) with images that have only translation between them
* enable surf for stitching tests
stitching.b12 test was failing with surf
investigated the issue, surf is producing good result. Transformation is only slightly different from ORB, so that resulting pano does not exactly match ORB's result. That caused sanity check to fail.
* added size checks similar to other tests
* sanity check will be applied only for ORB
* stitching: fix wrong estimator choice
if case was exactly wrong, estimators were chosen wrong
added logging for estimated transformation
* enable surf for matchers stitching tests
* enable SURF
* rework sanity checking. Check estimated transform instead of matches. Est. transform should be more stable and comparable between SURF and ORB.
* remove regression checking for VectorFeatures tests. It has a lot if data andtest is the same as previous except it test different vector size for performance, so sanity checking does not add any value here. Added basic sanity asserts instead.
* stitching tests: allow relative error for transform
* allows .01 relative error for estimated homography sanity check in stitching matchers tests
* fix VS warning
stitching tests: increase relative error
increase relative error to make it pass on all platforms (results are still good).
stitching test: allow bigger relative error
transformation can differ in small values (with small absolute difference, but large relative difference). transformation output still looks usable for all platforms. This difference affects only mac and windows, linux passes fine with small difference.
* stitching: add tests for affine matcher
uses s1, s2 images. added also new sanity data.
* stitching tests: use different data for matchers tests
this data should yeild more stable transformation (it has much more matches, especially for surf). Sanity data regenerated.
* stitching test: rework tests for matchers
* separated rotation and translations as they are different by scale.
* use appropriate absolute error for them separately. (relative error does not work for values near zero.)
* stitching: fix affine warper compensation
calculation of rotation and translation extracted for plane warper was wrong
* stitching test: enable surf for opencl integration tests
* enable SURF with correct guard (HAVE_OPENCV_XFEATURES2D)
* add OPENCL guard and correct namespace as usual for opencl tests
* stitching: add ocl accuracy test for affine warper
test consistent results with ocl on and off
* stitching: add affine warper ocl perf test
add affine warper to existing warper perf tests. Added new sanity data.
* stitching: do not overwrite inliers in affine matcher
* estimation is run second time on inliers only, inliers produces in second run will not be therefore correct for all matches
* calib3d: add Levenberg–Marquardt refining to estimateAffine2D* functions
this adds affine Levenberg–Marquardt refining to estimateAffine2D functions similar to what is done in findHomography.
implements Levenberg–Marquardt refinig for both full affine and partial affine transformations.
* stitching: remove reestimation step in affine matcher
reestimation step is not needed. estimateAffine2D* functions are running their own reestimation on inliers using the Levenberg-Marquardt algorithm, which is better than simply rerunning RANSAC on inliers.
* implement partial affine bundle adjuster
bundle adjuster that expect affine transform with 4DOF. Refines parameters for all cameras together.
stitching: fix bug in BundleAdjusterAffinePartial
* use the invers properly
* use static buffer for invers to speed it up
* samples: add affine bundle adjuster option to stitching_detailed
* add support for using affine bundle adjuster with 4DOF
* improve logging of initial intristics
* sttiching: add affine bundle adjuster test
* fix build warnings
* stitching: increase limit on sanity check
prevents spurious test failures on mac. values are still pretty fine.
* stitching: set affine bundle adjuster for SCANS mode
* fix bug with AffineBestOf2NearestMatcher (we want to select affine partial mode)
* select right bundle adjuster
* stitching: increase error bound for matcher tests
* this prevents failure on mac. tranformation is still ok.
* stitching: implement affine bundle adjuster
* implements affine bundle adjuster that is using full affine transform
* existing test case modified to test both affinePartial an full affine bundle adjuster
* add stitching tutorial
* show basic usage of stitching api (Stitcher class)
* stitching: add more integration test for affine stitching
* added new datasets to existing testcase
* removed unused include
* calib3d: move `haveCollinearPoints` to common header
* added comment to make that this also checks too close points
* calib3d: redone checkSubset for estimateAffine* callback
* use common function to check collinearity
* this also ensures that point will not be too close to each other
* calib3d: change estimateAffine* functions API
* more similar to `findHomography`, `findFundamentalMat`, `findEssentialMat` and similar
* follows standard recommended semantic INPUTS, OUTPUTS, FLAGS
* allows to disable refining
* supported LMEDS robust method (tests yet to come) along with RANSAC
* extended docs with some tips
* calib3d: rewrite estimateAffine2D test
* rewrite in googletest style
* parametrize to test both robust methods (RANSAC and LMEDS)
* get rid of boilerplate
* calib3d: rework estimateAffinePartial2D test
* rework in googletest style
* add testing for LMEDS
* calib3d: rework estimateAffine*2D perf test
* test for LMEDS speed
* test with/without Levenberg-Marquart
* remove sanity checking (this is covered by accuracy tests)
* calib3d: improve estimateAffine*2D tests
* test transformations in loop
* improves test by testing more potential transformations
* calib3d: rewrite kernels for estimateAffine*2D functions
* use analytical solution instead of SVD
* this version is faster especially for smaller amount of points
* calib3d: tune up perf of estimateAffine*2D functions
* avoid copying inliers
* avoid converting input points if not necessary
* check only `from` point for collinearity, as `to` does not affect stability of transform
* tutorials: add commands examples to stitching tutorials
* add some examples how to run stitcher sample code
* mention stitching_detailed.cpp
* calib3d: change computeError for estimateAffine*2D
* do error computing in floats instead of doubles
this have required precision + we were storing the result in float anyway. This make code faster and allows auto-vectorization by smart compilers.
* documentation: mention estimateAffine*2D function
* refer to new functions on appropriate places
* prefer estimateAffine*2D over estimateRigidTransform
* stitching: add camera models documentations
* mention camera models in module documentation to give user a better overview and reduce confusion
2016-10-23 00:10:42 +08:00
|
|
|
struct FindFeaturesBody : ParallelLoopBody
|
|
|
|
{
|
|
|
|
FindFeaturesBody(FeaturesFinder &finder, InputArrayOfArrays images,
|
|
|
|
std::vector<ImageFeatures> &features, const std::vector<std::vector<cv::Rect> > *rois)
|
|
|
|
: finder_(finder), images_(images), features_(features), rois_(rois) {}
|
|
|
|
|
2018-03-15 21:16:59 +08:00
|
|
|
void operator ()(const Range &r) const CV_OVERRIDE
|
Merge pull request #6933 from hrnr:gsoc_all
[GSOC] New camera model for stitching pipeline
* implement estimateAffine2D
estimates affine transformation using robust RANSAC method.
* uses RANSAC framework in calib3d
* includes accuracy test
* uses SVD decomposition for solving 3 point equation
* implement estimateAffinePartial2D
estimates limited affine transformation
* includes accuracy test
* stitching: add affine matcher
initial version of matcher that estimates affine transformation
* stitching: added affine transform estimator
initial version of estimator that simply chain transformations in homogeneous coordinates
* calib3d: rename estimateAffine3D test
test Calib3d_EstimateAffineTransform rename to Calib3d_EstimateAffine3D. This is more descriptive and prevents confusion with estimateAffine2D tests.
* added perf test for estimateAffine functions
tests both estimateAffine2D and estimateAffinePartial2D
* calib3d: compare error in square in estimateAffine2D
* incorporates fix from #6768
* rerun affine estimation on inliers
* stitching: new API for parallel feature finding
due to ABI breakage new functionality is added to `FeaturesFinder2`, `SurfFeaturesFinder2` and `OrbFeaturesFinder2`
* stitching: add tests for parallel feature find API
* perf test (about linear speed up)
* accuracy test compares results with serial version
* stitching: use dynamic_cast to overcome ABI issues
adding parallel API to FeaturesFinder breaks ABI. This commit uses dynamic_cast and hardcodes thread-safe finders to avoid breaking ABI.
This should be replaced by proper method similar to FeaturesMatcher on next ABI break.
* use estimateAffinePartial2D in AffineBestOf2NearestMatcher
* add constructor to AffineBestOf2NearestMatcher
* allows to choose between full affine transform and partial affine transform. Other params are the as for BestOf2NearestMatcher
* added protected field
* samples: stitching_detailed support affine estimator and matcher
* added new flags to choose matcher and estimator
* stitching: rework affine matcher
represent transformation in homogeneous coordinates
affine matcher: remove duplicite code
rework flow to get rid of duplicite code
affine matcher: do not center points to (0, 0)
it is not needed for affine model. it should not affect estimation in any way.
affine matcher: remove unneeded cv namespacing
* stitching: add stub bundle adjuster
* adds stub bundle adjuster that does nothing
* can be used in place of standard bundle adjusters to omit bundle adjusting step
* samples: stitching detailed, support no budle adjust
* uses new NoBundleAdjuster
* added affine warper
* uses R to get whole affine transformation and propagates rotation and translation to plane warper
* add affine warper factory class
* affine warper: compensate transformation
* samples: stitching_detailed add support for affine warper
* add Stitcher::create method
this method follows similar constructor methods and returns smart pointer. This allows constructing Stitcher according to OpenCV guidelines.
* supports multiple stitcher configurations (PANORAMA and SCANS) for convenient setup
* returns cv::Ptr
* stitcher: dynamicaly determine correct estimator
we need to use affine estimator for affine matcher
* preserves ABI (but add hints for ABI 4)
* uses dynamic_cast hack to inject correct estimator
* sample stitching: add support for multiple modes
shows how to use different configurations of stitcher easily (panorama stitching and scans affine model)
* stitcher: find features in parallel
use new FeatureFinder API to find features in parallel. Parallelized using TBB.
* stitching: disable parallel feature finding for OCL
it does not bring much speedup to run features finder in parallel when OpenCL is enabled, because finder needs to wait for OCL device.
Also, currently ORB is not thread-safe when OCL is enabled.
* stitching: move matcher tests
move matchers tests perf_stich.cpp -> perf_matchers.cpp
* stitching: add affine stiching integration test
test basic affine stitching (SCANS mode of stitcher) with images that have only translation between them
* enable surf for stitching tests
stitching.b12 test was failing with surf
investigated the issue, surf is producing good result. Transformation is only slightly different from ORB, so that resulting pano does not exactly match ORB's result. That caused sanity check to fail.
* added size checks similar to other tests
* sanity check will be applied only for ORB
* stitching: fix wrong estimator choice
if case was exactly wrong, estimators were chosen wrong
added logging for estimated transformation
* enable surf for matchers stitching tests
* enable SURF
* rework sanity checking. Check estimated transform instead of matches. Est. transform should be more stable and comparable between SURF and ORB.
* remove regression checking for VectorFeatures tests. It has a lot if data andtest is the same as previous except it test different vector size for performance, so sanity checking does not add any value here. Added basic sanity asserts instead.
* stitching tests: allow relative error for transform
* allows .01 relative error for estimated homography sanity check in stitching matchers tests
* fix VS warning
stitching tests: increase relative error
increase relative error to make it pass on all platforms (results are still good).
stitching test: allow bigger relative error
transformation can differ in small values (with small absolute difference, but large relative difference). transformation output still looks usable for all platforms. This difference affects only mac and windows, linux passes fine with small difference.
* stitching: add tests for affine matcher
uses s1, s2 images. added also new sanity data.
* stitching tests: use different data for matchers tests
this data should yeild more stable transformation (it has much more matches, especially for surf). Sanity data regenerated.
* stitching test: rework tests for matchers
* separated rotation and translations as they are different by scale.
* use appropriate absolute error for them separately. (relative error does not work for values near zero.)
* stitching: fix affine warper compensation
calculation of rotation and translation extracted for plane warper was wrong
* stitching test: enable surf for opencl integration tests
* enable SURF with correct guard (HAVE_OPENCV_XFEATURES2D)
* add OPENCL guard and correct namespace as usual for opencl tests
* stitching: add ocl accuracy test for affine warper
test consistent results with ocl on and off
* stitching: add affine warper ocl perf test
add affine warper to existing warper perf tests. Added new sanity data.
* stitching: do not overwrite inliers in affine matcher
* estimation is run second time on inliers only, inliers produces in second run will not be therefore correct for all matches
* calib3d: add Levenberg–Marquardt refining to estimateAffine2D* functions
this adds affine Levenberg–Marquardt refining to estimateAffine2D functions similar to what is done in findHomography.
implements Levenberg–Marquardt refinig for both full affine and partial affine transformations.
* stitching: remove reestimation step in affine matcher
reestimation step is not needed. estimateAffine2D* functions are running their own reestimation on inliers using the Levenberg-Marquardt algorithm, which is better than simply rerunning RANSAC on inliers.
* implement partial affine bundle adjuster
bundle adjuster that expect affine transform with 4DOF. Refines parameters for all cameras together.
stitching: fix bug in BundleAdjusterAffinePartial
* use the invers properly
* use static buffer for invers to speed it up
* samples: add affine bundle adjuster option to stitching_detailed
* add support for using affine bundle adjuster with 4DOF
* improve logging of initial intristics
* sttiching: add affine bundle adjuster test
* fix build warnings
* stitching: increase limit on sanity check
prevents spurious test failures on mac. values are still pretty fine.
* stitching: set affine bundle adjuster for SCANS mode
* fix bug with AffineBestOf2NearestMatcher (we want to select affine partial mode)
* select right bundle adjuster
* stitching: increase error bound for matcher tests
* this prevents failure on mac. tranformation is still ok.
* stitching: implement affine bundle adjuster
* implements affine bundle adjuster that is using full affine transform
* existing test case modified to test both affinePartial an full affine bundle adjuster
* add stitching tutorial
* show basic usage of stitching api (Stitcher class)
* stitching: add more integration test for affine stitching
* added new datasets to existing testcase
* removed unused include
* calib3d: move `haveCollinearPoints` to common header
* added comment to make that this also checks too close points
* calib3d: redone checkSubset for estimateAffine* callback
* use common function to check collinearity
* this also ensures that point will not be too close to each other
* calib3d: change estimateAffine* functions API
* more similar to `findHomography`, `findFundamentalMat`, `findEssentialMat` and similar
* follows standard recommended semantic INPUTS, OUTPUTS, FLAGS
* allows to disable refining
* supported LMEDS robust method (tests yet to come) along with RANSAC
* extended docs with some tips
* calib3d: rewrite estimateAffine2D test
* rewrite in googletest style
* parametrize to test both robust methods (RANSAC and LMEDS)
* get rid of boilerplate
* calib3d: rework estimateAffinePartial2D test
* rework in googletest style
* add testing for LMEDS
* calib3d: rework estimateAffine*2D perf test
* test for LMEDS speed
* test with/without Levenberg-Marquart
* remove sanity checking (this is covered by accuracy tests)
* calib3d: improve estimateAffine*2D tests
* test transformations in loop
* improves test by testing more potential transformations
* calib3d: rewrite kernels for estimateAffine*2D functions
* use analytical solution instead of SVD
* this version is faster especially for smaller amount of points
* calib3d: tune up perf of estimateAffine*2D functions
* avoid copying inliers
* avoid converting input points if not necessary
* check only `from` point for collinearity, as `to` does not affect stability of transform
* tutorials: add commands examples to stitching tutorials
* add some examples how to run stitcher sample code
* mention stitching_detailed.cpp
* calib3d: change computeError for estimateAffine*2D
* do error computing in floats instead of doubles
this have required precision + we were storing the result in float anyway. This make code faster and allows auto-vectorization by smart compilers.
* documentation: mention estimateAffine*2D function
* refer to new functions on appropriate places
* prefer estimateAffine*2D over estimateRigidTransform
* stitching: add camera models documentations
* mention camera models in module documentation to give user a better overview and reduce confusion
2016-10-23 00:10:42 +08:00
|
|
|
{
|
|
|
|
for (int i = r.start; i < r.end; ++i)
|
|
|
|
{
|
|
|
|
Mat image = images_.getMat(i);
|
|
|
|
if (rois_)
|
|
|
|
finder_(image, features_[i], (*rois_)[i]);
|
|
|
|
else
|
|
|
|
finder_(image, features_[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
FeaturesFinder &finder_;
|
|
|
|
InputArrayOfArrays images_;
|
|
|
|
std::vector<ImageFeatures> &features_;
|
|
|
|
const std::vector<std::vector<cv::Rect> > *rois_;
|
|
|
|
|
|
|
|
// to cease visual studio warning
|
|
|
|
void operator =(const FindFeaturesBody&);
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2012-10-17 15:12:04 +08:00
|
|
|
//////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
2013-02-25 00:14:01 +08:00
|
|
|
typedef std::set<std::pair<int,int> > MatchesSet;
|
2012-10-17 15:12:04 +08:00
|
|
|
|
|
|
|
// These two classes are aimed to find features matches only, not to
|
|
|
|
// estimate homography
|
|
|
|
|
2018-03-15 21:16:59 +08:00
|
|
|
class CpuMatcher CV_FINAL : public FeaturesMatcher
|
2012-10-17 15:12:04 +08:00
|
|
|
{
|
|
|
|
public:
|
|
|
|
CpuMatcher(float match_conf) : FeaturesMatcher(true), match_conf_(match_conf) {}
|
2018-03-15 21:16:59 +08:00
|
|
|
void match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo& matches_info) CV_OVERRIDE;
|
2012-10-17 15:12:04 +08:00
|
|
|
|
|
|
|
private:
|
|
|
|
float match_conf_;
|
|
|
|
};
|
|
|
|
|
2013-07-24 14:27:59 +08:00
|
|
|
#ifdef HAVE_OPENCV_CUDAFEATURES2D
|
2018-03-15 21:16:59 +08:00
|
|
|
class GpuMatcher CV_FINAL : public FeaturesMatcher
|
2012-10-17 15:12:04 +08:00
|
|
|
{
|
|
|
|
public:
|
|
|
|
GpuMatcher(float match_conf) : match_conf_(match_conf) {}
|
|
|
|
void match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo& matches_info);
|
|
|
|
|
|
|
|
void collectGarbage();
|
|
|
|
|
|
|
|
private:
|
|
|
|
float match_conf_;
|
|
|
|
GpuMat descriptors1_, descriptors2_;
|
|
|
|
GpuMat train_idx_, distance_, all_dist_;
|
2013-02-25 00:14:01 +08:00
|
|
|
std::vector< std::vector<DMatch> > pair_matches;
|
2012-10-17 15:12:04 +08:00
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
void CpuMatcher::match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo& matches_info)
|
|
|
|
{
|
2018-09-14 05:35:26 +08:00
|
|
|
CV_INSTRUMENT_REGION();
|
2016-08-18 14:53:00 +08:00
|
|
|
|
2012-10-17 15:12:04 +08:00
|
|
|
CV_Assert(features1.descriptors.type() == features2.descriptors.type());
|
|
|
|
CV_Assert(features2.descriptors.depth() == CV_8U || features2.descriptors.depth() == CV_32F);
|
|
|
|
|
|
|
|
#ifdef HAVE_TEGRA_OPTIMIZATION
|
2015-02-27 17:52:11 +08:00
|
|
|
if (tegra::useTegra() && tegra::match2nearest(features1, features2, matches_info, match_conf_))
|
2012-10-17 15:12:04 +08:00
|
|
|
return;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
matches_info.matches.clear();
|
|
|
|
|
2015-01-13 22:57:30 +08:00
|
|
|
Ptr<cv::DescriptorMatcher> matcher;
|
2014-02-21 21:58:33 +08:00
|
|
|
#if 0 // TODO check this
|
2017-11-24 22:34:02 +08:00
|
|
|
if (ocl::isOpenCLActivated())
|
2012-10-17 15:12:04 +08:00
|
|
|
{
|
2014-02-21 21:58:33 +08:00
|
|
|
matcher = makePtr<BFMatcher>((int)NORM_L2);
|
2012-10-17 15:12:04 +08:00
|
|
|
}
|
2014-02-21 21:58:33 +08:00
|
|
|
else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
Ptr<flann::IndexParams> indexParams = makePtr<flann::KDTreeIndexParams>();
|
|
|
|
Ptr<flann::SearchParams> searchParams = makePtr<flann::SearchParams>();
|
2012-10-17 15:12:04 +08:00
|
|
|
|
2014-02-21 21:58:33 +08:00
|
|
|
if (features2.descriptors.depth() == CV_8U)
|
|
|
|
{
|
|
|
|
indexParams->setAlgorithm(cvflann::FLANN_INDEX_LSH);
|
|
|
|
searchParams->setAlgorithm(cvflann::FLANN_INDEX_LSH);
|
|
|
|
}
|
|
|
|
|
|
|
|
matcher = makePtr<FlannBasedMatcher>(indexParams, searchParams);
|
|
|
|
}
|
2013-02-25 00:14:01 +08:00
|
|
|
std::vector< std::vector<DMatch> > pair_matches;
|
2012-10-17 15:12:04 +08:00
|
|
|
MatchesSet matches;
|
|
|
|
|
|
|
|
// Find 1->2 matches
|
2014-02-21 21:58:33 +08:00
|
|
|
matcher->knnMatch(features1.descriptors, features2.descriptors, pair_matches, 2);
|
2012-10-17 15:12:04 +08:00
|
|
|
for (size_t i = 0; i < pair_matches.size(); ++i)
|
|
|
|
{
|
|
|
|
if (pair_matches[i].size() < 2)
|
|
|
|
continue;
|
|
|
|
const DMatch& m0 = pair_matches[i][0];
|
|
|
|
const DMatch& m1 = pair_matches[i][1];
|
|
|
|
if (m0.distance < (1.f - match_conf_) * m1.distance)
|
|
|
|
{
|
|
|
|
matches_info.matches.push_back(m0);
|
2013-02-25 00:14:01 +08:00
|
|
|
matches.insert(std::make_pair(m0.queryIdx, m0.trainIdx));
|
2012-10-17 15:12:04 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
LOG("\n1->2 matches: " << matches_info.matches.size() << endl);
|
|
|
|
|
|
|
|
// Find 2->1 matches
|
|
|
|
pair_matches.clear();
|
2014-02-21 21:58:33 +08:00
|
|
|
matcher->knnMatch(features2.descriptors, features1.descriptors, pair_matches, 2);
|
2012-10-17 15:12:04 +08:00
|
|
|
for (size_t i = 0; i < pair_matches.size(); ++i)
|
|
|
|
{
|
|
|
|
if (pair_matches[i].size() < 2)
|
|
|
|
continue;
|
|
|
|
const DMatch& m0 = pair_matches[i][0];
|
|
|
|
const DMatch& m1 = pair_matches[i][1];
|
|
|
|
if (m0.distance < (1.f - match_conf_) * m1.distance)
|
2013-02-25 00:14:01 +08:00
|
|
|
if (matches.find(std::make_pair(m0.trainIdx, m0.queryIdx)) == matches.end())
|
2012-10-17 15:12:04 +08:00
|
|
|
matches_info.matches.push_back(DMatch(m0.trainIdx, m0.queryIdx, m0.distance));
|
|
|
|
}
|
|
|
|
LOG("1->2 & 2->1 matches: " << matches_info.matches.size() << endl);
|
|
|
|
}
|
|
|
|
|
2013-07-24 14:27:59 +08:00
|
|
|
#ifdef HAVE_OPENCV_CUDAFEATURES2D
|
2012-10-17 15:12:04 +08:00
|
|
|
void GpuMatcher::match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo& matches_info)
|
|
|
|
{
|
2018-09-14 05:35:26 +08:00
|
|
|
CV_INSTRUMENT_REGION();
|
2016-08-18 14:53:00 +08:00
|
|
|
|
2012-10-17 15:12:04 +08:00
|
|
|
matches_info.matches.clear();
|
|
|
|
|
|
|
|
ensureSizeIsEnough(features1.descriptors.size(), features1.descriptors.type(), descriptors1_);
|
|
|
|
ensureSizeIsEnough(features2.descriptors.size(), features2.descriptors.type(), descriptors2_);
|
|
|
|
|
|
|
|
descriptors1_.upload(features1.descriptors);
|
|
|
|
descriptors2_.upload(features2.descriptors);
|
|
|
|
|
2014-07-15 17:26:32 +08:00
|
|
|
//TODO: NORM_L1 allows to avoid matcher crashes for ORB features, but is not absolutely correct for them.
|
|
|
|
// The best choice for ORB features is NORM_HAMMING, but it is incorrect for SURF features.
|
|
|
|
// More accurate fix in this place should be done in the future -- the type of the norm
|
|
|
|
// should be either a parameter of this method, or a field of the class.
|
2015-12-08 15:24:54 +08:00
|
|
|
Ptr<cuda::DescriptorMatcher> matcher = cuda::DescriptorMatcher::createBFMatcher(NORM_L1);
|
2015-01-13 22:57:30 +08:00
|
|
|
|
2012-10-17 15:12:04 +08:00
|
|
|
MatchesSet matches;
|
|
|
|
|
|
|
|
// Find 1->2 matches
|
|
|
|
pair_matches.clear();
|
2015-01-13 22:57:30 +08:00
|
|
|
matcher->knnMatch(descriptors1_, descriptors2_, pair_matches, 2);
|
2012-10-17 15:12:04 +08:00
|
|
|
for (size_t i = 0; i < pair_matches.size(); ++i)
|
|
|
|
{
|
|
|
|
if (pair_matches[i].size() < 2)
|
|
|
|
continue;
|
|
|
|
const DMatch& m0 = pair_matches[i][0];
|
|
|
|
const DMatch& m1 = pair_matches[i][1];
|
|
|
|
if (m0.distance < (1.f - match_conf_) * m1.distance)
|
|
|
|
{
|
|
|
|
matches_info.matches.push_back(m0);
|
2013-02-25 00:14:01 +08:00
|
|
|
matches.insert(std::make_pair(m0.queryIdx, m0.trainIdx));
|
2012-10-17 15:12:04 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Find 2->1 matches
|
|
|
|
pair_matches.clear();
|
2015-01-13 22:57:30 +08:00
|
|
|
matcher->knnMatch(descriptors2_, descriptors1_, pair_matches, 2);
|
2012-10-17 15:12:04 +08:00
|
|
|
for (size_t i = 0; i < pair_matches.size(); ++i)
|
|
|
|
{
|
|
|
|
if (pair_matches[i].size() < 2)
|
|
|
|
continue;
|
|
|
|
const DMatch& m0 = pair_matches[i][0];
|
|
|
|
const DMatch& m1 = pair_matches[i][1];
|
|
|
|
if (m0.distance < (1.f - match_conf_) * m1.distance)
|
2013-02-25 00:14:01 +08:00
|
|
|
if (matches.find(std::make_pair(m0.trainIdx, m0.queryIdx)) == matches.end())
|
2012-10-17 15:12:04 +08:00
|
|
|
matches_info.matches.push_back(DMatch(m0.trainIdx, m0.queryIdx, m0.distance));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void GpuMatcher::collectGarbage()
|
|
|
|
{
|
|
|
|
descriptors1_.release();
|
|
|
|
descriptors2_.release();
|
|
|
|
train_idx_.release();
|
|
|
|
distance_.release();
|
|
|
|
all_dist_.release();
|
2013-02-25 00:14:01 +08:00
|
|
|
std::vector< std::vector<DMatch> >().swap(pair_matches);
|
2012-10-17 15:12:04 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
|
|
|
|
namespace cv {
|
|
|
|
namespace detail {
|
|
|
|
|
2014-02-14 19:36:04 +08:00
|
|
|
void FeaturesFinder::operator ()(InputArray image, ImageFeatures &features)
|
2012-10-17 15:12:04 +08:00
|
|
|
{
|
|
|
|
find(image, features);
|
|
|
|
features.img_size = image.size();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-02-14 19:36:04 +08:00
|
|
|
void FeaturesFinder::operator ()(InputArray image, ImageFeatures &features, const std::vector<Rect> &rois)
|
2012-10-17 15:12:04 +08:00
|
|
|
{
|
2013-02-25 00:14:01 +08:00
|
|
|
std::vector<ImageFeatures> roi_features(rois.size());
|
2012-10-17 15:12:04 +08:00
|
|
|
size_t total_kps_count = 0;
|
|
|
|
int total_descriptors_height = 0;
|
|
|
|
|
|
|
|
for (size_t i = 0; i < rois.size(); ++i)
|
|
|
|
{
|
2014-02-14 19:36:04 +08:00
|
|
|
find(image.getUMat()(rois[i]), roi_features[i]);
|
2012-10-17 15:12:04 +08:00
|
|
|
total_kps_count += roi_features[i].keypoints.size();
|
|
|
|
total_descriptors_height += roi_features[i].descriptors.rows;
|
|
|
|
}
|
|
|
|
|
|
|
|
features.img_size = image.size();
|
|
|
|
features.keypoints.resize(total_kps_count);
|
|
|
|
features.descriptors.create(total_descriptors_height,
|
|
|
|
roi_features[0].descriptors.cols,
|
|
|
|
roi_features[0].descriptors.type());
|
|
|
|
|
|
|
|
int kp_idx = 0;
|
|
|
|
int descr_offset = 0;
|
|
|
|
for (size_t i = 0; i < rois.size(); ++i)
|
|
|
|
{
|
|
|
|
for (size_t j = 0; j < roi_features[i].keypoints.size(); ++j, ++kp_idx)
|
|
|
|
{
|
|
|
|
features.keypoints[kp_idx] = roi_features[i].keypoints[j];
|
|
|
|
features.keypoints[kp_idx].pt.x += (float)rois[i].x;
|
|
|
|
features.keypoints[kp_idx].pt.y += (float)rois[i].y;
|
|
|
|
}
|
2014-02-14 19:36:04 +08:00
|
|
|
UMat subdescr = features.descriptors.rowRange(
|
2012-10-17 15:12:04 +08:00
|
|
|
descr_offset, descr_offset + roi_features[i].descriptors.rows);
|
|
|
|
roi_features[i].descriptors.copyTo(subdescr);
|
|
|
|
descr_offset += roi_features[i].descriptors.rows;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
Merge pull request #6933 from hrnr:gsoc_all
[GSOC] New camera model for stitching pipeline
* implement estimateAffine2D
estimates affine transformation using robust RANSAC method.
* uses RANSAC framework in calib3d
* includes accuracy test
* uses SVD decomposition for solving 3 point equation
* implement estimateAffinePartial2D
estimates limited affine transformation
* includes accuracy test
* stitching: add affine matcher
initial version of matcher that estimates affine transformation
* stitching: added affine transform estimator
initial version of estimator that simply chain transformations in homogeneous coordinates
* calib3d: rename estimateAffine3D test
test Calib3d_EstimateAffineTransform rename to Calib3d_EstimateAffine3D. This is more descriptive and prevents confusion with estimateAffine2D tests.
* added perf test for estimateAffine functions
tests both estimateAffine2D and estimateAffinePartial2D
* calib3d: compare error in square in estimateAffine2D
* incorporates fix from #6768
* rerun affine estimation on inliers
* stitching: new API for parallel feature finding
due to ABI breakage new functionality is added to `FeaturesFinder2`, `SurfFeaturesFinder2` and `OrbFeaturesFinder2`
* stitching: add tests for parallel feature find API
* perf test (about linear speed up)
* accuracy test compares results with serial version
* stitching: use dynamic_cast to overcome ABI issues
adding parallel API to FeaturesFinder breaks ABI. This commit uses dynamic_cast and hardcodes thread-safe finders to avoid breaking ABI.
This should be replaced by proper method similar to FeaturesMatcher on next ABI break.
* use estimateAffinePartial2D in AffineBestOf2NearestMatcher
* add constructor to AffineBestOf2NearestMatcher
* allows to choose between full affine transform and partial affine transform. Other params are the as for BestOf2NearestMatcher
* added protected field
* samples: stitching_detailed support affine estimator and matcher
* added new flags to choose matcher and estimator
* stitching: rework affine matcher
represent transformation in homogeneous coordinates
affine matcher: remove duplicite code
rework flow to get rid of duplicite code
affine matcher: do not center points to (0, 0)
it is not needed for affine model. it should not affect estimation in any way.
affine matcher: remove unneeded cv namespacing
* stitching: add stub bundle adjuster
* adds stub bundle adjuster that does nothing
* can be used in place of standard bundle adjusters to omit bundle adjusting step
* samples: stitching detailed, support no budle adjust
* uses new NoBundleAdjuster
* added affine warper
* uses R to get whole affine transformation and propagates rotation and translation to plane warper
* add affine warper factory class
* affine warper: compensate transformation
* samples: stitching_detailed add support for affine warper
* add Stitcher::create method
this method follows similar constructor methods and returns smart pointer. This allows constructing Stitcher according to OpenCV guidelines.
* supports multiple stitcher configurations (PANORAMA and SCANS) for convenient setup
* returns cv::Ptr
* stitcher: dynamicaly determine correct estimator
we need to use affine estimator for affine matcher
* preserves ABI (but add hints for ABI 4)
* uses dynamic_cast hack to inject correct estimator
* sample stitching: add support for multiple modes
shows how to use different configurations of stitcher easily (panorama stitching and scans affine model)
* stitcher: find features in parallel
use new FeatureFinder API to find features in parallel. Parallelized using TBB.
* stitching: disable parallel feature finding for OCL
it does not bring much speedup to run features finder in parallel when OpenCL is enabled, because finder needs to wait for OCL device.
Also, currently ORB is not thread-safe when OCL is enabled.
* stitching: move matcher tests
move matchers tests perf_stich.cpp -> perf_matchers.cpp
* stitching: add affine stiching integration test
test basic affine stitching (SCANS mode of stitcher) with images that have only translation between them
* enable surf for stitching tests
stitching.b12 test was failing with surf
investigated the issue, surf is producing good result. Transformation is only slightly different from ORB, so that resulting pano does not exactly match ORB's result. That caused sanity check to fail.
* added size checks similar to other tests
* sanity check will be applied only for ORB
* stitching: fix wrong estimator choice
if case was exactly wrong, estimators were chosen wrong
added logging for estimated transformation
* enable surf for matchers stitching tests
* enable SURF
* rework sanity checking. Check estimated transform instead of matches. Est. transform should be more stable and comparable between SURF and ORB.
* remove regression checking for VectorFeatures tests. It has a lot if data andtest is the same as previous except it test different vector size for performance, so sanity checking does not add any value here. Added basic sanity asserts instead.
* stitching tests: allow relative error for transform
* allows .01 relative error for estimated homography sanity check in stitching matchers tests
* fix VS warning
stitching tests: increase relative error
increase relative error to make it pass on all platforms (results are still good).
stitching test: allow bigger relative error
transformation can differ in small values (with small absolute difference, but large relative difference). transformation output still looks usable for all platforms. This difference affects only mac and windows, linux passes fine with small difference.
* stitching: add tests for affine matcher
uses s1, s2 images. added also new sanity data.
* stitching tests: use different data for matchers tests
this data should yeild more stable transformation (it has much more matches, especially for surf). Sanity data regenerated.
* stitching test: rework tests for matchers
* separated rotation and translations as they are different by scale.
* use appropriate absolute error for them separately. (relative error does not work for values near zero.)
* stitching: fix affine warper compensation
calculation of rotation and translation extracted for plane warper was wrong
* stitching test: enable surf for opencl integration tests
* enable SURF with correct guard (HAVE_OPENCV_XFEATURES2D)
* add OPENCL guard and correct namespace as usual for opencl tests
* stitching: add ocl accuracy test for affine warper
test consistent results with ocl on and off
* stitching: add affine warper ocl perf test
add affine warper to existing warper perf tests. Added new sanity data.
* stitching: do not overwrite inliers in affine matcher
* estimation is run second time on inliers only, inliers produces in second run will not be therefore correct for all matches
* calib3d: add Levenberg–Marquardt refining to estimateAffine2D* functions
this adds affine Levenberg–Marquardt refining to estimateAffine2D functions similar to what is done in findHomography.
implements Levenberg–Marquardt refinig for both full affine and partial affine transformations.
* stitching: remove reestimation step in affine matcher
reestimation step is not needed. estimateAffine2D* functions are running their own reestimation on inliers using the Levenberg-Marquardt algorithm, which is better than simply rerunning RANSAC on inliers.
* implement partial affine bundle adjuster
bundle adjuster that expect affine transform with 4DOF. Refines parameters for all cameras together.
stitching: fix bug in BundleAdjusterAffinePartial
* use the invers properly
* use static buffer for invers to speed it up
* samples: add affine bundle adjuster option to stitching_detailed
* add support for using affine bundle adjuster with 4DOF
* improve logging of initial intristics
* sttiching: add affine bundle adjuster test
* fix build warnings
* stitching: increase limit on sanity check
prevents spurious test failures on mac. values are still pretty fine.
* stitching: set affine bundle adjuster for SCANS mode
* fix bug with AffineBestOf2NearestMatcher (we want to select affine partial mode)
* select right bundle adjuster
* stitching: increase error bound for matcher tests
* this prevents failure on mac. tranformation is still ok.
* stitching: implement affine bundle adjuster
* implements affine bundle adjuster that is using full affine transform
* existing test case modified to test both affinePartial an full affine bundle adjuster
* add stitching tutorial
* show basic usage of stitching api (Stitcher class)
* stitching: add more integration test for affine stitching
* added new datasets to existing testcase
* removed unused include
* calib3d: move `haveCollinearPoints` to common header
* added comment to make that this also checks too close points
* calib3d: redone checkSubset for estimateAffine* callback
* use common function to check collinearity
* this also ensures that point will not be too close to each other
* calib3d: change estimateAffine* functions API
* more similar to `findHomography`, `findFundamentalMat`, `findEssentialMat` and similar
* follows standard recommended semantic INPUTS, OUTPUTS, FLAGS
* allows to disable refining
* supported LMEDS robust method (tests yet to come) along with RANSAC
* extended docs with some tips
* calib3d: rewrite estimateAffine2D test
* rewrite in googletest style
* parametrize to test both robust methods (RANSAC and LMEDS)
* get rid of boilerplate
* calib3d: rework estimateAffinePartial2D test
* rework in googletest style
* add testing for LMEDS
* calib3d: rework estimateAffine*2D perf test
* test for LMEDS speed
* test with/without Levenberg-Marquart
* remove sanity checking (this is covered by accuracy tests)
* calib3d: improve estimateAffine*2D tests
* test transformations in loop
* improves test by testing more potential transformations
* calib3d: rewrite kernels for estimateAffine*2D functions
* use analytical solution instead of SVD
* this version is faster especially for smaller amount of points
* calib3d: tune up perf of estimateAffine*2D functions
* avoid copying inliers
* avoid converting input points if not necessary
* check only `from` point for collinearity, as `to` does not affect stability of transform
* tutorials: add commands examples to stitching tutorials
* add some examples how to run stitcher sample code
* mention stitching_detailed.cpp
* calib3d: change computeError for estimateAffine*2D
* do error computing in floats instead of doubles
this have required precision + we were storing the result in float anyway. This make code faster and allows auto-vectorization by smart compilers.
* documentation: mention estimateAffine*2D function
* refer to new functions on appropriate places
* prefer estimateAffine*2D over estimateRigidTransform
* stitching: add camera models documentations
* mention camera models in module documentation to give user a better overview and reduce confusion
2016-10-23 00:10:42 +08:00
|
|
|
void FeaturesFinder::operator ()(InputArrayOfArrays images, std::vector<ImageFeatures> &features)
|
|
|
|
{
|
|
|
|
size_t count = images.total();
|
|
|
|
features.resize(count);
|
|
|
|
|
|
|
|
FindFeaturesBody body(*this, images, features, NULL);
|
|
|
|
if (isThreadSafe())
|
|
|
|
parallel_for_(Range(0, static_cast<int>(count)), body);
|
|
|
|
else
|
|
|
|
body(Range(0, static_cast<int>(count)));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void FeaturesFinder::operator ()(InputArrayOfArrays images, std::vector<ImageFeatures> &features,
|
|
|
|
const std::vector<std::vector<cv::Rect> > &rois)
|
|
|
|
{
|
|
|
|
CV_Assert(rois.size() == images.total());
|
|
|
|
size_t count = images.total();
|
|
|
|
features.resize(count);
|
|
|
|
|
|
|
|
FindFeaturesBody body(*this, images, features, &rois);
|
|
|
|
if (isThreadSafe())
|
|
|
|
parallel_for_(Range(0, static_cast<int>(count)), body);
|
|
|
|
else
|
|
|
|
body(Range(0, static_cast<int>(count)));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool FeaturesFinder::isThreadSafe() const
|
|
|
|
{
|
2017-11-24 22:34:02 +08:00
|
|
|
#ifdef HAVE_OPENCL
|
|
|
|
if (ocl::isOpenCLActivated())
|
Merge pull request #6933 from hrnr:gsoc_all
[GSOC] New camera model for stitching pipeline
* implement estimateAffine2D
estimates affine transformation using robust RANSAC method.
* uses RANSAC framework in calib3d
* includes accuracy test
* uses SVD decomposition for solving 3 point equation
* implement estimateAffinePartial2D
estimates limited affine transformation
* includes accuracy test
* stitching: add affine matcher
initial version of matcher that estimates affine transformation
* stitching: added affine transform estimator
initial version of estimator that simply chain transformations in homogeneous coordinates
* calib3d: rename estimateAffine3D test
test Calib3d_EstimateAffineTransform rename to Calib3d_EstimateAffine3D. This is more descriptive and prevents confusion with estimateAffine2D tests.
* added perf test for estimateAffine functions
tests both estimateAffine2D and estimateAffinePartial2D
* calib3d: compare error in square in estimateAffine2D
* incorporates fix from #6768
* rerun affine estimation on inliers
* stitching: new API for parallel feature finding
due to ABI breakage new functionality is added to `FeaturesFinder2`, `SurfFeaturesFinder2` and `OrbFeaturesFinder2`
* stitching: add tests for parallel feature find API
* perf test (about linear speed up)
* accuracy test compares results with serial version
* stitching: use dynamic_cast to overcome ABI issues
adding parallel API to FeaturesFinder breaks ABI. This commit uses dynamic_cast and hardcodes thread-safe finders to avoid breaking ABI.
This should be replaced by proper method similar to FeaturesMatcher on next ABI break.
* use estimateAffinePartial2D in AffineBestOf2NearestMatcher
* add constructor to AffineBestOf2NearestMatcher
* allows to choose between full affine transform and partial affine transform. Other params are the as for BestOf2NearestMatcher
* added protected field
* samples: stitching_detailed support affine estimator and matcher
* added new flags to choose matcher and estimator
* stitching: rework affine matcher
represent transformation in homogeneous coordinates
affine matcher: remove duplicite code
rework flow to get rid of duplicite code
affine matcher: do not center points to (0, 0)
it is not needed for affine model. it should not affect estimation in any way.
affine matcher: remove unneeded cv namespacing
* stitching: add stub bundle adjuster
* adds stub bundle adjuster that does nothing
* can be used in place of standard bundle adjusters to omit bundle adjusting step
* samples: stitching detailed, support no budle adjust
* uses new NoBundleAdjuster
* added affine warper
* uses R to get whole affine transformation and propagates rotation and translation to plane warper
* add affine warper factory class
* affine warper: compensate transformation
* samples: stitching_detailed add support for affine warper
* add Stitcher::create method
this method follows similar constructor methods and returns smart pointer. This allows constructing Stitcher according to OpenCV guidelines.
* supports multiple stitcher configurations (PANORAMA and SCANS) for convenient setup
* returns cv::Ptr
* stitcher: dynamicaly determine correct estimator
we need to use affine estimator for affine matcher
* preserves ABI (but add hints for ABI 4)
* uses dynamic_cast hack to inject correct estimator
* sample stitching: add support for multiple modes
shows how to use different configurations of stitcher easily (panorama stitching and scans affine model)
* stitcher: find features in parallel
use new FeatureFinder API to find features in parallel. Parallelized using TBB.
* stitching: disable parallel feature finding for OCL
it does not bring much speedup to run features finder in parallel when OpenCL is enabled, because finder needs to wait for OCL device.
Also, currently ORB is not thread-safe when OCL is enabled.
* stitching: move matcher tests
move matchers tests perf_stich.cpp -> perf_matchers.cpp
* stitching: add affine stiching integration test
test basic affine stitching (SCANS mode of stitcher) with images that have only translation between them
* enable surf for stitching tests
stitching.b12 test was failing with surf
investigated the issue, surf is producing good result. Transformation is only slightly different from ORB, so that resulting pano does not exactly match ORB's result. That caused sanity check to fail.
* added size checks similar to other tests
* sanity check will be applied only for ORB
* stitching: fix wrong estimator choice
if case was exactly wrong, estimators were chosen wrong
added logging for estimated transformation
* enable surf for matchers stitching tests
* enable SURF
* rework sanity checking. Check estimated transform instead of matches. Est. transform should be more stable and comparable between SURF and ORB.
* remove regression checking for VectorFeatures tests. It has a lot if data andtest is the same as previous except it test different vector size for performance, so sanity checking does not add any value here. Added basic sanity asserts instead.
* stitching tests: allow relative error for transform
* allows .01 relative error for estimated homography sanity check in stitching matchers tests
* fix VS warning
stitching tests: increase relative error
increase relative error to make it pass on all platforms (results are still good).
stitching test: allow bigger relative error
transformation can differ in small values (with small absolute difference, but large relative difference). transformation output still looks usable for all platforms. This difference affects only mac and windows, linux passes fine with small difference.
* stitching: add tests for affine matcher
uses s1, s2 images. added also new sanity data.
* stitching tests: use different data for matchers tests
this data should yeild more stable transformation (it has much more matches, especially for surf). Sanity data regenerated.
* stitching test: rework tests for matchers
* separated rotation and translations as they are different by scale.
* use appropriate absolute error for them separately. (relative error does not work for values near zero.)
* stitching: fix affine warper compensation
calculation of rotation and translation extracted for plane warper was wrong
* stitching test: enable surf for opencl integration tests
* enable SURF with correct guard (HAVE_OPENCV_XFEATURES2D)
* add OPENCL guard and correct namespace as usual for opencl tests
* stitching: add ocl accuracy test for affine warper
test consistent results with ocl on and off
* stitching: add affine warper ocl perf test
add affine warper to existing warper perf tests. Added new sanity data.
* stitching: do not overwrite inliers in affine matcher
* estimation is run second time on inliers only, inliers produces in second run will not be therefore correct for all matches
* calib3d: add Levenberg–Marquardt refining to estimateAffine2D* functions
this adds affine Levenberg–Marquardt refining to estimateAffine2D functions similar to what is done in findHomography.
implements Levenberg–Marquardt refinig for both full affine and partial affine transformations.
* stitching: remove reestimation step in affine matcher
reestimation step is not needed. estimateAffine2D* functions are running their own reestimation on inliers using the Levenberg-Marquardt algorithm, which is better than simply rerunning RANSAC on inliers.
* implement partial affine bundle adjuster
bundle adjuster that expect affine transform with 4DOF. Refines parameters for all cameras together.
stitching: fix bug in BundleAdjusterAffinePartial
* use the invers properly
* use static buffer for invers to speed it up
* samples: add affine bundle adjuster option to stitching_detailed
* add support for using affine bundle adjuster with 4DOF
* improve logging of initial intristics
* sttiching: add affine bundle adjuster test
* fix build warnings
* stitching: increase limit on sanity check
prevents spurious test failures on mac. values are still pretty fine.
* stitching: set affine bundle adjuster for SCANS mode
* fix bug with AffineBestOf2NearestMatcher (we want to select affine partial mode)
* select right bundle adjuster
* stitching: increase error bound for matcher tests
* this prevents failure on mac. tranformation is still ok.
* stitching: implement affine bundle adjuster
* implements affine bundle adjuster that is using full affine transform
* existing test case modified to test both affinePartial an full affine bundle adjuster
* add stitching tutorial
* show basic usage of stitching api (Stitcher class)
* stitching: add more integration test for affine stitching
* added new datasets to existing testcase
* removed unused include
* calib3d: move `haveCollinearPoints` to common header
* added comment to make that this also checks too close points
* calib3d: redone checkSubset for estimateAffine* callback
* use common function to check collinearity
* this also ensures that point will not be too close to each other
* calib3d: change estimateAffine* functions API
* more similar to `findHomography`, `findFundamentalMat`, `findEssentialMat` and similar
* follows standard recommended semantic INPUTS, OUTPUTS, FLAGS
* allows to disable refining
* supported LMEDS robust method (tests yet to come) along with RANSAC
* extended docs with some tips
* calib3d: rewrite estimateAffine2D test
* rewrite in googletest style
* parametrize to test both robust methods (RANSAC and LMEDS)
* get rid of boilerplate
* calib3d: rework estimateAffinePartial2D test
* rework in googletest style
* add testing for LMEDS
* calib3d: rework estimateAffine*2D perf test
* test for LMEDS speed
* test with/without Levenberg-Marquart
* remove sanity checking (this is covered by accuracy tests)
* calib3d: improve estimateAffine*2D tests
* test transformations in loop
* improves test by testing more potential transformations
* calib3d: rewrite kernels for estimateAffine*2D functions
* use analytical solution instead of SVD
* this version is faster especially for smaller amount of points
* calib3d: tune up perf of estimateAffine*2D functions
* avoid copying inliers
* avoid converting input points if not necessary
* check only `from` point for collinearity, as `to` does not affect stability of transform
* tutorials: add commands examples to stitching tutorials
* add some examples how to run stitcher sample code
* mention stitching_detailed.cpp
* calib3d: change computeError for estimateAffine*2D
* do error computing in floats instead of doubles
this have required precision + we were storing the result in float anyway. This make code faster and allows auto-vectorization by smart compilers.
* documentation: mention estimateAffine*2D function
* refer to new functions on appropriate places
* prefer estimateAffine*2D over estimateRigidTransform
* stitching: add camera models documentations
* mention camera models in module documentation to give user a better overview and reduce confusion
2016-10-23 00:10:42 +08:00
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
2017-11-24 22:34:02 +08:00
|
|
|
#endif
|
Merge pull request #6933 from hrnr:gsoc_all
[GSOC] New camera model for stitching pipeline
* implement estimateAffine2D
estimates affine transformation using robust RANSAC method.
* uses RANSAC framework in calib3d
* includes accuracy test
* uses SVD decomposition for solving 3 point equation
* implement estimateAffinePartial2D
estimates limited affine transformation
* includes accuracy test
* stitching: add affine matcher
initial version of matcher that estimates affine transformation
* stitching: added affine transform estimator
initial version of estimator that simply chain transformations in homogeneous coordinates
* calib3d: rename estimateAffine3D test
test Calib3d_EstimateAffineTransform rename to Calib3d_EstimateAffine3D. This is more descriptive and prevents confusion with estimateAffine2D tests.
* added perf test for estimateAffine functions
tests both estimateAffine2D and estimateAffinePartial2D
* calib3d: compare error in square in estimateAffine2D
* incorporates fix from #6768
* rerun affine estimation on inliers
* stitching: new API for parallel feature finding
due to ABI breakage new functionality is added to `FeaturesFinder2`, `SurfFeaturesFinder2` and `OrbFeaturesFinder2`
* stitching: add tests for parallel feature find API
* perf test (about linear speed up)
* accuracy test compares results with serial version
* stitching: use dynamic_cast to overcome ABI issues
adding parallel API to FeaturesFinder breaks ABI. This commit uses dynamic_cast and hardcodes thread-safe finders to avoid breaking ABI.
This should be replaced by proper method similar to FeaturesMatcher on next ABI break.
* use estimateAffinePartial2D in AffineBestOf2NearestMatcher
* add constructor to AffineBestOf2NearestMatcher
* allows to choose between full affine transform and partial affine transform. Other params are the as for BestOf2NearestMatcher
* added protected field
* samples: stitching_detailed support affine estimator and matcher
* added new flags to choose matcher and estimator
* stitching: rework affine matcher
represent transformation in homogeneous coordinates
affine matcher: remove duplicite code
rework flow to get rid of duplicite code
affine matcher: do not center points to (0, 0)
it is not needed for affine model. it should not affect estimation in any way.
affine matcher: remove unneeded cv namespacing
* stitching: add stub bundle adjuster
* adds stub bundle adjuster that does nothing
* can be used in place of standard bundle adjusters to omit bundle adjusting step
* samples: stitching detailed, support no budle adjust
* uses new NoBundleAdjuster
* added affine warper
* uses R to get whole affine transformation and propagates rotation and translation to plane warper
* add affine warper factory class
* affine warper: compensate transformation
* samples: stitching_detailed add support for affine warper
* add Stitcher::create method
this method follows similar constructor methods and returns smart pointer. This allows constructing Stitcher according to OpenCV guidelines.
* supports multiple stitcher configurations (PANORAMA and SCANS) for convenient setup
* returns cv::Ptr
* stitcher: dynamicaly determine correct estimator
we need to use affine estimator for affine matcher
* preserves ABI (but add hints for ABI 4)
* uses dynamic_cast hack to inject correct estimator
* sample stitching: add support for multiple modes
shows how to use different configurations of stitcher easily (panorama stitching and scans affine model)
* stitcher: find features in parallel
use new FeatureFinder API to find features in parallel. Parallelized using TBB.
* stitching: disable parallel feature finding for OCL
it does not bring much speedup to run features finder in parallel when OpenCL is enabled, because finder needs to wait for OCL device.
Also, currently ORB is not thread-safe when OCL is enabled.
* stitching: move matcher tests
move matchers tests perf_stich.cpp -> perf_matchers.cpp
* stitching: add affine stiching integration test
test basic affine stitching (SCANS mode of stitcher) with images that have only translation between them
* enable surf for stitching tests
stitching.b12 test was failing with surf
investigated the issue, surf is producing good result. Transformation is only slightly different from ORB, so that resulting pano does not exactly match ORB's result. That caused sanity check to fail.
* added size checks similar to other tests
* sanity check will be applied only for ORB
* stitching: fix wrong estimator choice
if case was exactly wrong, estimators were chosen wrong
added logging for estimated transformation
* enable surf for matchers stitching tests
* enable SURF
* rework sanity checking. Check estimated transform instead of matches. Est. transform should be more stable and comparable between SURF and ORB.
* remove regression checking for VectorFeatures tests. It has a lot if data andtest is the same as previous except it test different vector size for performance, so sanity checking does not add any value here. Added basic sanity asserts instead.
* stitching tests: allow relative error for transform
* allows .01 relative error for estimated homography sanity check in stitching matchers tests
* fix VS warning
stitching tests: increase relative error
increase relative error to make it pass on all platforms (results are still good).
stitching test: allow bigger relative error
transformation can differ in small values (with small absolute difference, but large relative difference). transformation output still looks usable for all platforms. This difference affects only mac and windows, linux passes fine with small difference.
* stitching: add tests for affine matcher
uses s1, s2 images. added also new sanity data.
* stitching tests: use different data for matchers tests
this data should yeild more stable transformation (it has much more matches, especially for surf). Sanity data regenerated.
* stitching test: rework tests for matchers
* separated rotation and translations as they are different by scale.
* use appropriate absolute error for them separately. (relative error does not work for values near zero.)
* stitching: fix affine warper compensation
calculation of rotation and translation extracted for plane warper was wrong
* stitching test: enable surf for opencl integration tests
* enable SURF with correct guard (HAVE_OPENCV_XFEATURES2D)
* add OPENCL guard and correct namespace as usual for opencl tests
* stitching: add ocl accuracy test for affine warper
test consistent results with ocl on and off
* stitching: add affine warper ocl perf test
add affine warper to existing warper perf tests. Added new sanity data.
* stitching: do not overwrite inliers in affine matcher
* estimation is run second time on inliers only, inliers produces in second run will not be therefore correct for all matches
* calib3d: add Levenberg–Marquardt refining to estimateAffine2D* functions
this adds affine Levenberg–Marquardt refining to estimateAffine2D functions similar to what is done in findHomography.
implements Levenberg–Marquardt refinig for both full affine and partial affine transformations.
* stitching: remove reestimation step in affine matcher
reestimation step is not needed. estimateAffine2D* functions are running their own reestimation on inliers using the Levenberg-Marquardt algorithm, which is better than simply rerunning RANSAC on inliers.
* implement partial affine bundle adjuster
bundle adjuster that expect affine transform with 4DOF. Refines parameters for all cameras together.
stitching: fix bug in BundleAdjusterAffinePartial
* use the invers properly
* use static buffer for invers to speed it up
* samples: add affine bundle adjuster option to stitching_detailed
* add support for using affine bundle adjuster with 4DOF
* improve logging of initial intristics
* sttiching: add affine bundle adjuster test
* fix build warnings
* stitching: increase limit on sanity check
prevents spurious test failures on mac. values are still pretty fine.
* stitching: set affine bundle adjuster for SCANS mode
* fix bug with AffineBestOf2NearestMatcher (we want to select affine partial mode)
* select right bundle adjuster
* stitching: increase error bound for matcher tests
* this prevents failure on mac. tranformation is still ok.
* stitching: implement affine bundle adjuster
* implements affine bundle adjuster that is using full affine transform
* existing test case modified to test both affinePartial an full affine bundle adjuster
* add stitching tutorial
* show basic usage of stitching api (Stitcher class)
* stitching: add more integration test for affine stitching
* added new datasets to existing testcase
* removed unused include
* calib3d: move `haveCollinearPoints` to common header
* added comment to make that this also checks too close points
* calib3d: redone checkSubset for estimateAffine* callback
* use common function to check collinearity
* this also ensures that point will not be too close to each other
* calib3d: change estimateAffine* functions API
* more similar to `findHomography`, `findFundamentalMat`, `findEssentialMat` and similar
* follows standard recommended semantic INPUTS, OUTPUTS, FLAGS
* allows to disable refining
* supported LMEDS robust method (tests yet to come) along with RANSAC
* extended docs with some tips
* calib3d: rewrite estimateAffine2D test
* rewrite in googletest style
* parametrize to test both robust methods (RANSAC and LMEDS)
* get rid of boilerplate
* calib3d: rework estimateAffinePartial2D test
* rework in googletest style
* add testing for LMEDS
* calib3d: rework estimateAffine*2D perf test
* test for LMEDS speed
* test with/without Levenberg-Marquart
* remove sanity checking (this is covered by accuracy tests)
* calib3d: improve estimateAffine*2D tests
* test transformations in loop
* improves test by testing more potential transformations
* calib3d: rewrite kernels for estimateAffine*2D functions
* use analytical solution instead of SVD
* this version is faster especially for smaller amount of points
* calib3d: tune up perf of estimateAffine*2D functions
* avoid copying inliers
* avoid converting input points if not necessary
* check only `from` point for collinearity, as `to` does not affect stability of transform
* tutorials: add commands examples to stitching tutorials
* add some examples how to run stitcher sample code
* mention stitching_detailed.cpp
* calib3d: change computeError for estimateAffine*2D
* do error computing in floats instead of doubles
this have required precision + we were storing the result in float anyway. This make code faster and allows auto-vectorization by smart compilers.
* documentation: mention estimateAffine*2D function
* refer to new functions on appropriate places
* prefer estimateAffine*2D over estimateRigidTransform
* stitching: add camera models documentations
* mention camera models in module documentation to give user a better overview and reduce confusion
2016-10-23 00:10:42 +08:00
|
|
|
if (dynamic_cast<const SurfFeaturesFinder*>(this))
|
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
else if (dynamic_cast<const OrbFeaturesFinder*>(this))
|
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-10-17 15:12:04 +08:00
|
|
|
SurfFeaturesFinder::SurfFeaturesFinder(double hess_thresh, int num_octaves, int num_layers,
|
|
|
|
int num_octaves_descr, int num_layers_descr)
|
|
|
|
{
|
2014-10-17 20:09:09 +08:00
|
|
|
#ifdef HAVE_OPENCV_XFEATURES2D
|
2012-10-17 15:12:04 +08:00
|
|
|
if (num_octaves_descr == num_octaves && num_layers_descr == num_layers)
|
|
|
|
{
|
2014-10-19 00:44:26 +08:00
|
|
|
Ptr<SURF> surf_ = SURF::create();
|
|
|
|
if( !surf_ )
|
2013-04-11 23:27:54 +08:00
|
|
|
CV_Error( Error::StsNotImplemented, "OpenCV was built without SURF support" );
|
2014-10-19 00:44:26 +08:00
|
|
|
surf_->setHessianThreshold(hess_thresh);
|
|
|
|
surf_->setNOctaves(num_octaves);
|
|
|
|
surf_->setNOctaveLayers(num_layers);
|
|
|
|
surf = surf_;
|
2012-10-17 15:12:04 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2014-10-19 00:44:26 +08:00
|
|
|
Ptr<SURF> sdetector_ = SURF::create();
|
|
|
|
Ptr<SURF> sextractor_ = SURF::create();
|
2012-10-17 15:12:04 +08:00
|
|
|
|
2014-10-19 00:44:26 +08:00
|
|
|
if( !sdetector_ || !sextractor_ )
|
2013-04-11 23:27:54 +08:00
|
|
|
CV_Error( Error::StsNotImplemented, "OpenCV was built without SURF support" );
|
2012-10-17 15:12:04 +08:00
|
|
|
|
2014-10-19 00:44:26 +08:00
|
|
|
sdetector_->setHessianThreshold(hess_thresh);
|
|
|
|
sdetector_->setNOctaves(num_octaves);
|
|
|
|
sdetector_->setNOctaveLayers(num_layers);
|
2012-10-17 15:12:04 +08:00
|
|
|
|
2014-10-19 00:44:26 +08:00
|
|
|
sextractor_->setNOctaves(num_octaves_descr);
|
|
|
|
sextractor_->setNOctaveLayers(num_layers_descr);
|
|
|
|
|
|
|
|
detector_ = sdetector_;
|
|
|
|
extractor_ = sextractor_;
|
2012-10-17 15:12:04 +08:00
|
|
|
}
|
2014-10-17 20:09:09 +08:00
|
|
|
#else
|
2018-09-07 19:33:52 +08:00
|
|
|
CV_UNUSED(hess_thresh);
|
|
|
|
CV_UNUSED(num_octaves);
|
|
|
|
CV_UNUSED(num_layers);
|
|
|
|
CV_UNUSED(num_octaves_descr);
|
|
|
|
CV_UNUSED(num_layers_descr);
|
2014-10-17 20:09:09 +08:00
|
|
|
CV_Error( Error::StsNotImplemented, "OpenCV was built without SURF support" );
|
|
|
|
#endif
|
2012-10-17 15:12:04 +08:00
|
|
|
}
|
|
|
|
|
2014-02-14 19:36:04 +08:00
|
|
|
void SurfFeaturesFinder::find(InputArray image, ImageFeatures &features)
|
2012-10-17 15:12:04 +08:00
|
|
|
{
|
2014-02-14 19:36:04 +08:00
|
|
|
UMat gray_image;
|
2013-04-05 01:22:07 +08:00
|
|
|
CV_Assert((image.type() == CV_8UC3) || (image.type() == CV_8UC1));
|
|
|
|
if(image.type() == CV_8UC3)
|
|
|
|
{
|
2013-04-06 22:16:51 +08:00
|
|
|
cvtColor(image, gray_image, COLOR_BGR2GRAY);
|
2013-04-05 01:22:07 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2014-02-14 19:36:04 +08:00
|
|
|
gray_image = image.getUMat();
|
2013-04-05 01:22:07 +08:00
|
|
|
}
|
2013-09-06 19:44:44 +08:00
|
|
|
if (!surf)
|
2012-10-17 15:12:04 +08:00
|
|
|
{
|
|
|
|
detector_->detect(gray_image, features.keypoints);
|
|
|
|
extractor_->compute(gray_image, features.keypoints, features.descriptors);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2014-02-14 19:36:04 +08:00
|
|
|
UMat descriptors;
|
2014-10-16 02:49:17 +08:00
|
|
|
surf->detectAndCompute(gray_image, Mat(), features.keypoints, descriptors);
|
2012-10-17 15:12:04 +08:00
|
|
|
features.descriptors = descriptors.reshape(1, (int)features.keypoints.size());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-02 15:58:49 +08:00
|
|
|
SiftFeaturesFinder::SiftFeaturesFinder()
|
|
|
|
{
|
|
|
|
#ifdef HAVE_OPENCV_XFEATURES2D
|
|
|
|
Ptr<SIFT> sift_ = SIFT::create();
|
|
|
|
if( !sift_ )
|
|
|
|
CV_Error( Error::StsNotImplemented, "OpenCV was built without SIFT support" );
|
|
|
|
sift = sift_;
|
|
|
|
#else
|
|
|
|
CV_Error( Error::StsNotImplemented, "OpenCV was built without SIFT support" );
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
void SiftFeaturesFinder::find(InputArray image, ImageFeatures &features)
|
|
|
|
{
|
|
|
|
UMat gray_image;
|
|
|
|
CV_Assert((image.type() == CV_8UC3) || (image.type() == CV_8UC1));
|
|
|
|
if(image.type() == CV_8UC3)
|
|
|
|
{
|
|
|
|
cvtColor(image, gray_image, COLOR_BGR2GRAY);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
gray_image = image.getUMat();
|
|
|
|
}
|
|
|
|
UMat descriptors;
|
|
|
|
sift->detectAndCompute(gray_image, Mat(), features.keypoints, descriptors);
|
|
|
|
features.descriptors = descriptors.reshape(1, (int)features.keypoints.size());
|
|
|
|
}
|
|
|
|
|
2012-10-17 15:12:04 +08:00
|
|
|
OrbFeaturesFinder::OrbFeaturesFinder(Size _grid_size, int n_features, float scaleFactor, int nlevels)
|
|
|
|
{
|
|
|
|
grid_size = _grid_size;
|
2014-10-16 02:49:17 +08:00
|
|
|
orb = ORB::create(n_features * (99 + grid_size.area())/100/grid_size.area(), scaleFactor, nlevels);
|
2012-10-17 15:12:04 +08:00
|
|
|
}
|
|
|
|
|
2014-02-14 19:36:04 +08:00
|
|
|
void OrbFeaturesFinder::find(InputArray image, ImageFeatures &features)
|
2012-10-17 15:12:04 +08:00
|
|
|
{
|
2014-02-14 19:36:04 +08:00
|
|
|
UMat gray_image;
|
2012-10-17 15:12:04 +08:00
|
|
|
|
|
|
|
CV_Assert((image.type() == CV_8UC3) || (image.type() == CV_8UC4) || (image.type() == CV_8UC1));
|
|
|
|
|
|
|
|
if (image.type() == CV_8UC3) {
|
2013-04-06 22:16:51 +08:00
|
|
|
cvtColor(image, gray_image, COLOR_BGR2GRAY);
|
2012-10-17 15:12:04 +08:00
|
|
|
} else if (image.type() == CV_8UC4) {
|
2013-04-06 22:16:51 +08:00
|
|
|
cvtColor(image, gray_image, COLOR_BGRA2GRAY);
|
2012-10-17 15:12:04 +08:00
|
|
|
} else if (image.type() == CV_8UC1) {
|
2014-02-14 19:36:04 +08:00
|
|
|
gray_image = image.getUMat();
|
2012-10-17 15:12:04 +08:00
|
|
|
} else {
|
2013-04-11 23:27:54 +08:00
|
|
|
CV_Error(Error::StsUnsupportedFormat, "");
|
2012-10-17 15:12:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (grid_size.area() == 1)
|
2014-10-16 02:49:17 +08:00
|
|
|
orb->detectAndCompute(gray_image, Mat(), features.keypoints, features.descriptors);
|
2012-10-17 15:12:04 +08:00
|
|
|
else
|
|
|
|
{
|
|
|
|
features.keypoints.clear();
|
|
|
|
features.descriptors.release();
|
|
|
|
|
|
|
|
std::vector<KeyPoint> points;
|
2014-02-14 19:36:04 +08:00
|
|
|
Mat _descriptors;
|
|
|
|
UMat descriptors;
|
2012-10-17 15:12:04 +08:00
|
|
|
|
|
|
|
for (int r = 0; r < grid_size.height; ++r)
|
|
|
|
for (int c = 0; c < grid_size.width; ++c)
|
|
|
|
{
|
|
|
|
int xl = c * gray_image.cols / grid_size.width;
|
|
|
|
int yl = r * gray_image.rows / grid_size.height;
|
|
|
|
int xr = (c+1) * gray_image.cols / grid_size.width;
|
|
|
|
int yr = (r+1) * gray_image.rows / grid_size.height;
|
|
|
|
|
|
|
|
// LOGLN("OrbFeaturesFinder::find: gray_image.empty=" << (gray_image.empty()?"true":"false") << ", "
|
|
|
|
// << " gray_image.size()=(" << gray_image.size().width << "x" << gray_image.size().height << "), "
|
|
|
|
// << " yl=" << yl << ", yr=" << yr << ", "
|
|
|
|
// << " xl=" << xl << ", xr=" << xr << ", gray_image.data=" << ((size_t)gray_image.data) << ", "
|
|
|
|
// << "gray_image.dims=" << gray_image.dims << "\n");
|
|
|
|
|
2014-02-14 19:36:04 +08:00
|
|
|
UMat gray_image_part=gray_image(Range(yl, yr), Range(xl, xr));
|
2012-10-17 15:12:04 +08:00
|
|
|
// LOGLN("OrbFeaturesFinder::find: gray_image_part.empty=" << (gray_image_part.empty()?"true":"false") << ", "
|
|
|
|
// << " gray_image_part.size()=(" << gray_image_part.size().width << "x" << gray_image_part.size().height << "), "
|
|
|
|
// << " gray_image_part.dims=" << gray_image_part.dims << ", "
|
|
|
|
// << " gray_image_part.data=" << ((size_t)gray_image_part.data) << "\n");
|
|
|
|
|
2014-10-16 02:49:17 +08:00
|
|
|
orb->detectAndCompute(gray_image_part, UMat(), points, descriptors);
|
2012-10-17 15:12:04 +08:00
|
|
|
|
|
|
|
features.keypoints.reserve(features.keypoints.size() + points.size());
|
|
|
|
for (std::vector<KeyPoint>::iterator kp = points.begin(); kp != points.end(); ++kp)
|
|
|
|
{
|
|
|
|
kp->pt.x += xl;
|
|
|
|
kp->pt.y += yl;
|
|
|
|
features.keypoints.push_back(*kp);
|
|
|
|
}
|
2014-02-14 19:36:04 +08:00
|
|
|
_descriptors.push_back(descriptors.getMat(ACCESS_READ));
|
2012-10-17 15:12:04 +08:00
|
|
|
}
|
2014-02-14 19:36:04 +08:00
|
|
|
|
2014-02-26 21:15:46 +08:00
|
|
|
// TODO optimize copyTo()
|
|
|
|
//features.descriptors = _descriptors.getUMat(ACCESS_READ);
|
|
|
|
_descriptors.copyTo(features.descriptors);
|
2012-10-17 15:12:04 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-06 21:40:05 +08:00
|
|
|
AKAZEFeaturesFinder::AKAZEFeaturesFinder(int descriptor_type,
|
|
|
|
int descriptor_size,
|
|
|
|
int descriptor_channels,
|
|
|
|
float threshold,
|
|
|
|
int nOctaves,
|
|
|
|
int nOctaveLayers,
|
|
|
|
int diffusivity)
|
|
|
|
{
|
|
|
|
akaze = AKAZE::create(descriptor_type, descriptor_size, descriptor_channels,
|
|
|
|
threshold, nOctaves, nOctaveLayers, diffusivity);
|
|
|
|
}
|
|
|
|
|
|
|
|
void AKAZEFeaturesFinder::find(InputArray image, detail::ImageFeatures &features)
|
|
|
|
{
|
|
|
|
CV_Assert((image.type() == CV_8UC3) || (image.type() == CV_8UC1));
|
2017-06-21 19:33:09 +08:00
|
|
|
akaze->detectAndCompute(image, noArray(), features.keypoints, features.descriptors);
|
2016-10-06 21:40:05 +08:00
|
|
|
}
|
|
|
|
|
2014-08-22 21:33:24 +08:00
|
|
|
#ifdef HAVE_OPENCV_XFEATURES2D
|
2012-10-17 15:12:04 +08:00
|
|
|
SurfFeaturesFinderGpu::SurfFeaturesFinderGpu(double hess_thresh, int num_octaves, int num_layers,
|
|
|
|
int num_octaves_descr, int num_layers_descr)
|
|
|
|
{
|
|
|
|
surf_.keypointsRatio = 0.1f;
|
|
|
|
surf_.hessianThreshold = hess_thresh;
|
|
|
|
surf_.extended = false;
|
|
|
|
num_octaves_ = num_octaves;
|
|
|
|
num_layers_ = num_layers;
|
|
|
|
num_octaves_descr_ = num_octaves_descr;
|
|
|
|
num_layers_descr_ = num_layers_descr;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-02-14 19:36:04 +08:00
|
|
|
void SurfFeaturesFinderGpu::find(InputArray image, ImageFeatures &features)
|
2012-10-17 15:12:04 +08:00
|
|
|
{
|
|
|
|
CV_Assert(image.depth() == CV_8U);
|
|
|
|
|
|
|
|
ensureSizeIsEnough(image.size(), image.type(), image_);
|
|
|
|
image_.upload(image);
|
|
|
|
|
|
|
|
ensureSizeIsEnough(image.size(), CV_8UC1, gray_image_);
|
2017-08-18 00:32:53 +08:00
|
|
|
|
|
|
|
#ifdef HAVE_OPENCV_CUDAIMGPROC
|
|
|
|
cv::cuda::cvtColor(image_, gray_image_, COLOR_BGR2GRAY);
|
|
|
|
#else
|
2013-04-06 22:16:51 +08:00
|
|
|
cvtColor(image_, gray_image_, COLOR_BGR2GRAY);
|
2017-08-18 00:32:53 +08:00
|
|
|
#endif
|
2012-10-17 15:12:04 +08:00
|
|
|
|
|
|
|
surf_.nOctaves = num_octaves_;
|
|
|
|
surf_.nOctaveLayers = num_layers_;
|
|
|
|
surf_.upright = false;
|
|
|
|
surf_(gray_image_, GpuMat(), keypoints_);
|
|
|
|
|
|
|
|
surf_.nOctaves = num_octaves_descr_;
|
|
|
|
surf_.nOctaveLayers = num_layers_descr_;
|
|
|
|
surf_.upright = true;
|
|
|
|
surf_(gray_image_, GpuMat(), keypoints_, descriptors_, true);
|
|
|
|
surf_.downloadKeypoints(keypoints_, features.keypoints);
|
|
|
|
|
|
|
|
descriptors_.download(features.descriptors);
|
|
|
|
}
|
|
|
|
|
|
|
|
void SurfFeaturesFinderGpu::collectGarbage()
|
|
|
|
{
|
|
|
|
surf_.releaseMemory();
|
|
|
|
image_.release();
|
|
|
|
gray_image_.release();
|
|
|
|
keypoints_.release();
|
|
|
|
descriptors_.release();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
MatchesInfo::MatchesInfo() : src_img_idx(-1), dst_img_idx(-1), num_inliers(0), confidence(0) {}
|
|
|
|
|
|
|
|
MatchesInfo::MatchesInfo(const MatchesInfo &other) { *this = other; }
|
|
|
|
|
2017-09-05 22:10:16 +08:00
|
|
|
MatchesInfo& MatchesInfo::operator =(const MatchesInfo &other)
|
2012-10-17 15:12:04 +08:00
|
|
|
{
|
|
|
|
src_img_idx = other.src_img_idx;
|
|
|
|
dst_img_idx = other.dst_img_idx;
|
|
|
|
matches = other.matches;
|
|
|
|
inliers_mask = other.inliers_mask;
|
|
|
|
num_inliers = other.num_inliers;
|
|
|
|
H = other.H.clone();
|
|
|
|
confidence = other.confidence;
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
2013-02-25 00:14:01 +08:00
|
|
|
void FeaturesMatcher::operator ()(const std::vector<ImageFeatures> &features, std::vector<MatchesInfo> &pairwise_matches,
|
2014-02-14 19:36:04 +08:00
|
|
|
const UMat &mask)
|
2012-10-17 15:12:04 +08:00
|
|
|
{
|
|
|
|
const int num_images = static_cast<int>(features.size());
|
|
|
|
|
|
|
|
CV_Assert(mask.empty() || (mask.type() == CV_8U && mask.cols == num_images && mask.rows));
|
2014-02-14 19:36:04 +08:00
|
|
|
Mat_<uchar> mask_(mask.getMat(ACCESS_READ));
|
2012-10-17 15:12:04 +08:00
|
|
|
if (mask_.empty())
|
|
|
|
mask_ = Mat::ones(num_images, num_images, CV_8U);
|
|
|
|
|
2013-02-25 00:14:01 +08:00
|
|
|
std::vector<std::pair<int,int> > near_pairs;
|
2012-10-17 15:12:04 +08:00
|
|
|
for (int i = 0; i < num_images - 1; ++i)
|
|
|
|
for (int j = i + 1; j < num_images; ++j)
|
|
|
|
if (features[i].keypoints.size() > 0 && features[j].keypoints.size() > 0 && mask_(i, j))
|
2013-02-25 00:14:01 +08:00
|
|
|
near_pairs.push_back(std::make_pair(i, j));
|
2012-10-17 15:12:04 +08:00
|
|
|
|
|
|
|
pairwise_matches.resize(num_images * num_images);
|
|
|
|
MatchPairsBody body(*this, features, pairwise_matches, near_pairs);
|
|
|
|
|
|
|
|
if (is_thread_safe_)
|
2013-05-30 22:44:33 +08:00
|
|
|
parallel_for_(Range(0, static_cast<int>(near_pairs.size())), body);
|
2012-10-17 15:12:04 +08:00
|
|
|
else
|
2013-05-30 22:44:33 +08:00
|
|
|
body(Range(0, static_cast<int>(near_pairs.size())));
|
2012-10-17 15:12:04 +08:00
|
|
|
LOGLN_CHAT("");
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
BestOf2NearestMatcher::BestOf2NearestMatcher(bool try_use_gpu, float match_conf, int num_matches_thresh1, int num_matches_thresh2)
|
|
|
|
{
|
2018-09-07 19:33:52 +08:00
|
|
|
CV_UNUSED(try_use_gpu);
|
2013-06-04 17:32:35 +08:00
|
|
|
|
2013-07-24 14:27:59 +08:00
|
|
|
#ifdef HAVE_OPENCV_CUDAFEATURES2D
|
2012-10-17 15:12:04 +08:00
|
|
|
if (try_use_gpu && getCudaEnabledDeviceCount() > 0)
|
2013-06-04 17:32:35 +08:00
|
|
|
{
|
2013-09-06 19:44:44 +08:00
|
|
|
impl_ = makePtr<GpuMatcher>(match_conf);
|
2013-06-04 17:32:35 +08:00
|
|
|
}
|
2012-10-17 15:12:04 +08:00
|
|
|
else
|
|
|
|
#endif
|
2013-06-04 17:32:35 +08:00
|
|
|
{
|
2013-09-06 19:44:44 +08:00
|
|
|
impl_ = makePtr<CpuMatcher>(match_conf);
|
2013-06-04 17:32:35 +08:00
|
|
|
}
|
2012-10-17 15:12:04 +08:00
|
|
|
|
|
|
|
is_thread_safe_ = impl_->isThreadSafe();
|
|
|
|
num_matches_thresh1_ = num_matches_thresh1;
|
|
|
|
num_matches_thresh2_ = num_matches_thresh2;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void BestOf2NearestMatcher::match(const ImageFeatures &features1, const ImageFeatures &features2,
|
|
|
|
MatchesInfo &matches_info)
|
|
|
|
{
|
2018-09-14 05:35:26 +08:00
|
|
|
CV_INSTRUMENT_REGION();
|
2016-08-18 14:53:00 +08:00
|
|
|
|
2012-10-17 15:12:04 +08:00
|
|
|
(*impl_)(features1, features2, matches_info);
|
|
|
|
|
|
|
|
// Check if it makes sense to find homography
|
|
|
|
if (matches_info.matches.size() < static_cast<size_t>(num_matches_thresh1_))
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Construct point-point correspondences for homography estimation
|
|
|
|
Mat src_points(1, static_cast<int>(matches_info.matches.size()), CV_32FC2);
|
|
|
|
Mat dst_points(1, static_cast<int>(matches_info.matches.size()), CV_32FC2);
|
|
|
|
for (size_t i = 0; i < matches_info.matches.size(); ++i)
|
|
|
|
{
|
|
|
|
const DMatch& m = matches_info.matches[i];
|
|
|
|
|
|
|
|
Point2f p = features1.keypoints[m.queryIdx].pt;
|
|
|
|
p.x -= features1.img_size.width * 0.5f;
|
|
|
|
p.y -= features1.img_size.height * 0.5f;
|
|
|
|
src_points.at<Point2f>(0, static_cast<int>(i)) = p;
|
|
|
|
|
|
|
|
p = features2.keypoints[m.trainIdx].pt;
|
|
|
|
p.x -= features2.img_size.width * 0.5f;
|
|
|
|
p.y -= features2.img_size.height * 0.5f;
|
|
|
|
dst_points.at<Point2f>(0, static_cast<int>(i)) = p;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Find pair-wise motion
|
2013-04-11 23:27:54 +08:00
|
|
|
matches_info.H = findHomography(src_points, dst_points, matches_info.inliers_mask, RANSAC);
|
2013-03-13 02:36:00 +08:00
|
|
|
if (matches_info.H.empty() || std::abs(determinant(matches_info.H)) < std::numeric_limits<double>::epsilon())
|
2012-10-17 15:12:04 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
// Find number of inliers
|
|
|
|
matches_info.num_inliers = 0;
|
|
|
|
for (size_t i = 0; i < matches_info.inliers_mask.size(); ++i)
|
|
|
|
if (matches_info.inliers_mask[i])
|
|
|
|
matches_info.num_inliers++;
|
|
|
|
|
|
|
|
// These coeffs are from paper M. Brown and D. Lowe. "Automatic Panoramic Image Stitching
|
|
|
|
// using Invariant Features"
|
|
|
|
matches_info.confidence = matches_info.num_inliers / (8 + 0.3 * matches_info.matches.size());
|
|
|
|
|
|
|
|
// Set zero confidence to remove matches between too close images, as they don't provide
|
|
|
|
// additional information anyway. The threshold was set experimentally.
|
|
|
|
matches_info.confidence = matches_info.confidence > 3. ? 0. : matches_info.confidence;
|
|
|
|
|
|
|
|
// Check if we should try to refine motion
|
|
|
|
if (matches_info.num_inliers < num_matches_thresh2_)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Construct point-point correspondences for inliers only
|
|
|
|
src_points.create(1, matches_info.num_inliers, CV_32FC2);
|
|
|
|
dst_points.create(1, matches_info.num_inliers, CV_32FC2);
|
|
|
|
int inlier_idx = 0;
|
|
|
|
for (size_t i = 0; i < matches_info.matches.size(); ++i)
|
|
|
|
{
|
|
|
|
if (!matches_info.inliers_mask[i])
|
|
|
|
continue;
|
|
|
|
|
|
|
|
const DMatch& m = matches_info.matches[i];
|
|
|
|
|
|
|
|
Point2f p = features1.keypoints[m.queryIdx].pt;
|
|
|
|
p.x -= features1.img_size.width * 0.5f;
|
|
|
|
p.y -= features1.img_size.height * 0.5f;
|
|
|
|
src_points.at<Point2f>(0, inlier_idx) = p;
|
|
|
|
|
|
|
|
p = features2.keypoints[m.trainIdx].pt;
|
|
|
|
p.x -= features2.img_size.width * 0.5f;
|
|
|
|
p.y -= features2.img_size.height * 0.5f;
|
|
|
|
dst_points.at<Point2f>(0, inlier_idx) = p;
|
|
|
|
|
|
|
|
inlier_idx++;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Rerun motion estimation on inliers only
|
2013-04-11 23:27:54 +08:00
|
|
|
matches_info.H = findHomography(src_points, dst_points, RANSAC);
|
2012-10-17 15:12:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void BestOf2NearestMatcher::collectGarbage()
|
|
|
|
{
|
|
|
|
impl_->collectGarbage();
|
|
|
|
}
|
|
|
|
|
2014-05-17 13:52:07 +08:00
|
|
|
|
|
|
|
BestOf2NearestRangeMatcher::BestOf2NearestRangeMatcher(int range_width, bool try_use_gpu, float match_conf, int num_matches_thresh1, int num_matches_thresh2): BestOf2NearestMatcher(try_use_gpu, match_conf, num_matches_thresh1, num_matches_thresh2)
|
|
|
|
{
|
|
|
|
range_width_ = range_width;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void BestOf2NearestRangeMatcher::operator ()(const std::vector<ImageFeatures> &features, std::vector<MatchesInfo> &pairwise_matches,
|
|
|
|
const UMat &mask)
|
|
|
|
{
|
|
|
|
const int num_images = static_cast<int>(features.size());
|
|
|
|
|
|
|
|
CV_Assert(mask.empty() || (mask.type() == CV_8U && mask.cols == num_images && mask.rows));
|
|
|
|
Mat_<uchar> mask_(mask.getMat(ACCESS_READ));
|
|
|
|
if (mask_.empty())
|
|
|
|
mask_ = Mat::ones(num_images, num_images, CV_8U);
|
|
|
|
|
|
|
|
std::vector<std::pair<int,int> > near_pairs;
|
|
|
|
for (int i = 0; i < num_images - 1; ++i)
|
|
|
|
for (int j = i + 1; j < std::min(num_images, i + range_width_); ++j)
|
|
|
|
if (features[i].keypoints.size() > 0 && features[j].keypoints.size() > 0 && mask_(i, j))
|
|
|
|
near_pairs.push_back(std::make_pair(i, j));
|
|
|
|
|
|
|
|
pairwise_matches.resize(num_images * num_images);
|
|
|
|
MatchPairsBody body(*this, features, pairwise_matches, near_pairs);
|
|
|
|
|
|
|
|
if (is_thread_safe_)
|
|
|
|
parallel_for_(Range(0, static_cast<int>(near_pairs.size())), body);
|
|
|
|
else
|
|
|
|
body(Range(0, static_cast<int>(near_pairs.size())));
|
|
|
|
LOGLN_CHAT("");
|
|
|
|
}
|
|
|
|
|
|
|
|
|
Merge pull request #6933 from hrnr:gsoc_all
[GSOC] New camera model for stitching pipeline
* implement estimateAffine2D
estimates affine transformation using robust RANSAC method.
* uses RANSAC framework in calib3d
* includes accuracy test
* uses SVD decomposition for solving 3 point equation
* implement estimateAffinePartial2D
estimates limited affine transformation
* includes accuracy test
* stitching: add affine matcher
initial version of matcher that estimates affine transformation
* stitching: added affine transform estimator
initial version of estimator that simply chain transformations in homogeneous coordinates
* calib3d: rename estimateAffine3D test
test Calib3d_EstimateAffineTransform rename to Calib3d_EstimateAffine3D. This is more descriptive and prevents confusion with estimateAffine2D tests.
* added perf test for estimateAffine functions
tests both estimateAffine2D and estimateAffinePartial2D
* calib3d: compare error in square in estimateAffine2D
* incorporates fix from #6768
* rerun affine estimation on inliers
* stitching: new API for parallel feature finding
due to ABI breakage new functionality is added to `FeaturesFinder2`, `SurfFeaturesFinder2` and `OrbFeaturesFinder2`
* stitching: add tests for parallel feature find API
* perf test (about linear speed up)
* accuracy test compares results with serial version
* stitching: use dynamic_cast to overcome ABI issues
adding parallel API to FeaturesFinder breaks ABI. This commit uses dynamic_cast and hardcodes thread-safe finders to avoid breaking ABI.
This should be replaced by proper method similar to FeaturesMatcher on next ABI break.
* use estimateAffinePartial2D in AffineBestOf2NearestMatcher
* add constructor to AffineBestOf2NearestMatcher
* allows to choose between full affine transform and partial affine transform. Other params are the as for BestOf2NearestMatcher
* added protected field
* samples: stitching_detailed support affine estimator and matcher
* added new flags to choose matcher and estimator
* stitching: rework affine matcher
represent transformation in homogeneous coordinates
affine matcher: remove duplicite code
rework flow to get rid of duplicite code
affine matcher: do not center points to (0, 0)
it is not needed for affine model. it should not affect estimation in any way.
affine matcher: remove unneeded cv namespacing
* stitching: add stub bundle adjuster
* adds stub bundle adjuster that does nothing
* can be used in place of standard bundle adjusters to omit bundle adjusting step
* samples: stitching detailed, support no budle adjust
* uses new NoBundleAdjuster
* added affine warper
* uses R to get whole affine transformation and propagates rotation and translation to plane warper
* add affine warper factory class
* affine warper: compensate transformation
* samples: stitching_detailed add support for affine warper
* add Stitcher::create method
this method follows similar constructor methods and returns smart pointer. This allows constructing Stitcher according to OpenCV guidelines.
* supports multiple stitcher configurations (PANORAMA and SCANS) for convenient setup
* returns cv::Ptr
* stitcher: dynamicaly determine correct estimator
we need to use affine estimator for affine matcher
* preserves ABI (but add hints for ABI 4)
* uses dynamic_cast hack to inject correct estimator
* sample stitching: add support for multiple modes
shows how to use different configurations of stitcher easily (panorama stitching and scans affine model)
* stitcher: find features in parallel
use new FeatureFinder API to find features in parallel. Parallelized using TBB.
* stitching: disable parallel feature finding for OCL
it does not bring much speedup to run features finder in parallel when OpenCL is enabled, because finder needs to wait for OCL device.
Also, currently ORB is not thread-safe when OCL is enabled.
* stitching: move matcher tests
move matchers tests perf_stich.cpp -> perf_matchers.cpp
* stitching: add affine stiching integration test
test basic affine stitching (SCANS mode of stitcher) with images that have only translation between them
* enable surf for stitching tests
stitching.b12 test was failing with surf
investigated the issue, surf is producing good result. Transformation is only slightly different from ORB, so that resulting pano does not exactly match ORB's result. That caused sanity check to fail.
* added size checks similar to other tests
* sanity check will be applied only for ORB
* stitching: fix wrong estimator choice
if case was exactly wrong, estimators were chosen wrong
added logging for estimated transformation
* enable surf for matchers stitching tests
* enable SURF
* rework sanity checking. Check estimated transform instead of matches. Est. transform should be more stable and comparable between SURF and ORB.
* remove regression checking for VectorFeatures tests. It has a lot if data andtest is the same as previous except it test different vector size for performance, so sanity checking does not add any value here. Added basic sanity asserts instead.
* stitching tests: allow relative error for transform
* allows .01 relative error for estimated homography sanity check in stitching matchers tests
* fix VS warning
stitching tests: increase relative error
increase relative error to make it pass on all platforms (results are still good).
stitching test: allow bigger relative error
transformation can differ in small values (with small absolute difference, but large relative difference). transformation output still looks usable for all platforms. This difference affects only mac and windows, linux passes fine with small difference.
* stitching: add tests for affine matcher
uses s1, s2 images. added also new sanity data.
* stitching tests: use different data for matchers tests
this data should yeild more stable transformation (it has much more matches, especially for surf). Sanity data regenerated.
* stitching test: rework tests for matchers
* separated rotation and translations as they are different by scale.
* use appropriate absolute error for them separately. (relative error does not work for values near zero.)
* stitching: fix affine warper compensation
calculation of rotation and translation extracted for plane warper was wrong
* stitching test: enable surf for opencl integration tests
* enable SURF with correct guard (HAVE_OPENCV_XFEATURES2D)
* add OPENCL guard and correct namespace as usual for opencl tests
* stitching: add ocl accuracy test for affine warper
test consistent results with ocl on and off
* stitching: add affine warper ocl perf test
add affine warper to existing warper perf tests. Added new sanity data.
* stitching: do not overwrite inliers in affine matcher
* estimation is run second time on inliers only, inliers produces in second run will not be therefore correct for all matches
* calib3d: add Levenberg–Marquardt refining to estimateAffine2D* functions
this adds affine Levenberg–Marquardt refining to estimateAffine2D functions similar to what is done in findHomography.
implements Levenberg–Marquardt refinig for both full affine and partial affine transformations.
* stitching: remove reestimation step in affine matcher
reestimation step is not needed. estimateAffine2D* functions are running their own reestimation on inliers using the Levenberg-Marquardt algorithm, which is better than simply rerunning RANSAC on inliers.
* implement partial affine bundle adjuster
bundle adjuster that expect affine transform with 4DOF. Refines parameters for all cameras together.
stitching: fix bug in BundleAdjusterAffinePartial
* use the invers properly
* use static buffer for invers to speed it up
* samples: add affine bundle adjuster option to stitching_detailed
* add support for using affine bundle adjuster with 4DOF
* improve logging of initial intristics
* sttiching: add affine bundle adjuster test
* fix build warnings
* stitching: increase limit on sanity check
prevents spurious test failures on mac. values are still pretty fine.
* stitching: set affine bundle adjuster for SCANS mode
* fix bug with AffineBestOf2NearestMatcher (we want to select affine partial mode)
* select right bundle adjuster
* stitching: increase error bound for matcher tests
* this prevents failure on mac. tranformation is still ok.
* stitching: implement affine bundle adjuster
* implements affine bundle adjuster that is using full affine transform
* existing test case modified to test both affinePartial an full affine bundle adjuster
* add stitching tutorial
* show basic usage of stitching api (Stitcher class)
* stitching: add more integration test for affine stitching
* added new datasets to existing testcase
* removed unused include
* calib3d: move `haveCollinearPoints` to common header
* added comment to make that this also checks too close points
* calib3d: redone checkSubset for estimateAffine* callback
* use common function to check collinearity
* this also ensures that point will not be too close to each other
* calib3d: change estimateAffine* functions API
* more similar to `findHomography`, `findFundamentalMat`, `findEssentialMat` and similar
* follows standard recommended semantic INPUTS, OUTPUTS, FLAGS
* allows to disable refining
* supported LMEDS robust method (tests yet to come) along with RANSAC
* extended docs with some tips
* calib3d: rewrite estimateAffine2D test
* rewrite in googletest style
* parametrize to test both robust methods (RANSAC and LMEDS)
* get rid of boilerplate
* calib3d: rework estimateAffinePartial2D test
* rework in googletest style
* add testing for LMEDS
* calib3d: rework estimateAffine*2D perf test
* test for LMEDS speed
* test with/without Levenberg-Marquart
* remove sanity checking (this is covered by accuracy tests)
* calib3d: improve estimateAffine*2D tests
* test transformations in loop
* improves test by testing more potential transformations
* calib3d: rewrite kernels for estimateAffine*2D functions
* use analytical solution instead of SVD
* this version is faster especially for smaller amount of points
* calib3d: tune up perf of estimateAffine*2D functions
* avoid copying inliers
* avoid converting input points if not necessary
* check only `from` point for collinearity, as `to` does not affect stability of transform
* tutorials: add commands examples to stitching tutorials
* add some examples how to run stitcher sample code
* mention stitching_detailed.cpp
* calib3d: change computeError for estimateAffine*2D
* do error computing in floats instead of doubles
this have required precision + we were storing the result in float anyway. This make code faster and allows auto-vectorization by smart compilers.
* documentation: mention estimateAffine*2D function
* refer to new functions on appropriate places
* prefer estimateAffine*2D over estimateRigidTransform
* stitching: add camera models documentations
* mention camera models in module documentation to give user a better overview and reduce confusion
2016-10-23 00:10:42 +08:00
|
|
|
void AffineBestOf2NearestMatcher::match(const ImageFeatures &features1, const ImageFeatures &features2,
|
|
|
|
MatchesInfo &matches_info)
|
|
|
|
{
|
|
|
|
(*impl_)(features1, features2, matches_info);
|
|
|
|
|
|
|
|
// Check if it makes sense to find transform
|
|
|
|
if (matches_info.matches.size() < static_cast<size_t>(num_matches_thresh1_))
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Construct point-point correspondences for transform estimation
|
|
|
|
Mat src_points(1, static_cast<int>(matches_info.matches.size()), CV_32FC2);
|
|
|
|
Mat dst_points(1, static_cast<int>(matches_info.matches.size()), CV_32FC2);
|
|
|
|
for (size_t i = 0; i < matches_info.matches.size(); ++i)
|
|
|
|
{
|
|
|
|
const cv::DMatch &m = matches_info.matches[i];
|
|
|
|
src_points.at<Point2f>(0, static_cast<int>(i)) = features1.keypoints[m.queryIdx].pt;
|
|
|
|
dst_points.at<Point2f>(0, static_cast<int>(i)) = features2.keypoints[m.trainIdx].pt;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Find pair-wise motion
|
|
|
|
if (full_affine_)
|
|
|
|
matches_info.H = estimateAffine2D(src_points, dst_points, matches_info.inliers_mask);
|
|
|
|
else
|
|
|
|
matches_info.H = estimateAffinePartial2D(src_points, dst_points, matches_info.inliers_mask);
|
|
|
|
|
|
|
|
if (matches_info.H.empty()) {
|
|
|
|
// could not find transformation
|
|
|
|
matches_info.confidence = 0;
|
|
|
|
matches_info.num_inliers = 0;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Find number of inliers
|
|
|
|
matches_info.num_inliers = 0;
|
|
|
|
for (size_t i = 0; i < matches_info.inliers_mask.size(); ++i)
|
|
|
|
if (matches_info.inliers_mask[i])
|
|
|
|
matches_info.num_inliers++;
|
|
|
|
|
|
|
|
// These coeffs are from paper M. Brown and D. Lowe. "Automatic Panoramic
|
|
|
|
// Image Stitching using Invariant Features"
|
|
|
|
matches_info.confidence =
|
|
|
|
matches_info.num_inliers / (8 + 0.3 * matches_info.matches.size());
|
|
|
|
|
|
|
|
/* should we remove matches between too close images? */
|
|
|
|
// matches_info.confidence = matches_info.confidence > 3. ? 0. : matches_info.confidence;
|
|
|
|
|
2018-02-12 23:15:36 +08:00
|
|
|
// extend H to represent linear transformation in homogeneous coordinates
|
Merge pull request #6933 from hrnr:gsoc_all
[GSOC] New camera model for stitching pipeline
* implement estimateAffine2D
estimates affine transformation using robust RANSAC method.
* uses RANSAC framework in calib3d
* includes accuracy test
* uses SVD decomposition for solving 3 point equation
* implement estimateAffinePartial2D
estimates limited affine transformation
* includes accuracy test
* stitching: add affine matcher
initial version of matcher that estimates affine transformation
* stitching: added affine transform estimator
initial version of estimator that simply chain transformations in homogeneous coordinates
* calib3d: rename estimateAffine3D test
test Calib3d_EstimateAffineTransform rename to Calib3d_EstimateAffine3D. This is more descriptive and prevents confusion with estimateAffine2D tests.
* added perf test for estimateAffine functions
tests both estimateAffine2D and estimateAffinePartial2D
* calib3d: compare error in square in estimateAffine2D
* incorporates fix from #6768
* rerun affine estimation on inliers
* stitching: new API for parallel feature finding
due to ABI breakage new functionality is added to `FeaturesFinder2`, `SurfFeaturesFinder2` and `OrbFeaturesFinder2`
* stitching: add tests for parallel feature find API
* perf test (about linear speed up)
* accuracy test compares results with serial version
* stitching: use dynamic_cast to overcome ABI issues
adding parallel API to FeaturesFinder breaks ABI. This commit uses dynamic_cast and hardcodes thread-safe finders to avoid breaking ABI.
This should be replaced by proper method similar to FeaturesMatcher on next ABI break.
* use estimateAffinePartial2D in AffineBestOf2NearestMatcher
* add constructor to AffineBestOf2NearestMatcher
* allows to choose between full affine transform and partial affine transform. Other params are the as for BestOf2NearestMatcher
* added protected field
* samples: stitching_detailed support affine estimator and matcher
* added new flags to choose matcher and estimator
* stitching: rework affine matcher
represent transformation in homogeneous coordinates
affine matcher: remove duplicite code
rework flow to get rid of duplicite code
affine matcher: do not center points to (0, 0)
it is not needed for affine model. it should not affect estimation in any way.
affine matcher: remove unneeded cv namespacing
* stitching: add stub bundle adjuster
* adds stub bundle adjuster that does nothing
* can be used in place of standard bundle adjusters to omit bundle adjusting step
* samples: stitching detailed, support no budle adjust
* uses new NoBundleAdjuster
* added affine warper
* uses R to get whole affine transformation and propagates rotation and translation to plane warper
* add affine warper factory class
* affine warper: compensate transformation
* samples: stitching_detailed add support for affine warper
* add Stitcher::create method
this method follows similar constructor methods and returns smart pointer. This allows constructing Stitcher according to OpenCV guidelines.
* supports multiple stitcher configurations (PANORAMA and SCANS) for convenient setup
* returns cv::Ptr
* stitcher: dynamicaly determine correct estimator
we need to use affine estimator for affine matcher
* preserves ABI (but add hints for ABI 4)
* uses dynamic_cast hack to inject correct estimator
* sample stitching: add support for multiple modes
shows how to use different configurations of stitcher easily (panorama stitching and scans affine model)
* stitcher: find features in parallel
use new FeatureFinder API to find features in parallel. Parallelized using TBB.
* stitching: disable parallel feature finding for OCL
it does not bring much speedup to run features finder in parallel when OpenCL is enabled, because finder needs to wait for OCL device.
Also, currently ORB is not thread-safe when OCL is enabled.
* stitching: move matcher tests
move matchers tests perf_stich.cpp -> perf_matchers.cpp
* stitching: add affine stiching integration test
test basic affine stitching (SCANS mode of stitcher) with images that have only translation between them
* enable surf for stitching tests
stitching.b12 test was failing with surf
investigated the issue, surf is producing good result. Transformation is only slightly different from ORB, so that resulting pano does not exactly match ORB's result. That caused sanity check to fail.
* added size checks similar to other tests
* sanity check will be applied only for ORB
* stitching: fix wrong estimator choice
if case was exactly wrong, estimators were chosen wrong
added logging for estimated transformation
* enable surf for matchers stitching tests
* enable SURF
* rework sanity checking. Check estimated transform instead of matches. Est. transform should be more stable and comparable between SURF and ORB.
* remove regression checking for VectorFeatures tests. It has a lot if data andtest is the same as previous except it test different vector size for performance, so sanity checking does not add any value here. Added basic sanity asserts instead.
* stitching tests: allow relative error for transform
* allows .01 relative error for estimated homography sanity check in stitching matchers tests
* fix VS warning
stitching tests: increase relative error
increase relative error to make it pass on all platforms (results are still good).
stitching test: allow bigger relative error
transformation can differ in small values (with small absolute difference, but large relative difference). transformation output still looks usable for all platforms. This difference affects only mac and windows, linux passes fine with small difference.
* stitching: add tests for affine matcher
uses s1, s2 images. added also new sanity data.
* stitching tests: use different data for matchers tests
this data should yeild more stable transformation (it has much more matches, especially for surf). Sanity data regenerated.
* stitching test: rework tests for matchers
* separated rotation and translations as they are different by scale.
* use appropriate absolute error for them separately. (relative error does not work for values near zero.)
* stitching: fix affine warper compensation
calculation of rotation and translation extracted for plane warper was wrong
* stitching test: enable surf for opencl integration tests
* enable SURF with correct guard (HAVE_OPENCV_XFEATURES2D)
* add OPENCL guard and correct namespace as usual for opencl tests
* stitching: add ocl accuracy test for affine warper
test consistent results with ocl on and off
* stitching: add affine warper ocl perf test
add affine warper to existing warper perf tests. Added new sanity data.
* stitching: do not overwrite inliers in affine matcher
* estimation is run second time on inliers only, inliers produces in second run will not be therefore correct for all matches
* calib3d: add Levenberg–Marquardt refining to estimateAffine2D* functions
this adds affine Levenberg–Marquardt refining to estimateAffine2D functions similar to what is done in findHomography.
implements Levenberg–Marquardt refinig for both full affine and partial affine transformations.
* stitching: remove reestimation step in affine matcher
reestimation step is not needed. estimateAffine2D* functions are running their own reestimation on inliers using the Levenberg-Marquardt algorithm, which is better than simply rerunning RANSAC on inliers.
* implement partial affine bundle adjuster
bundle adjuster that expect affine transform with 4DOF. Refines parameters for all cameras together.
stitching: fix bug in BundleAdjusterAffinePartial
* use the invers properly
* use static buffer for invers to speed it up
* samples: add affine bundle adjuster option to stitching_detailed
* add support for using affine bundle adjuster with 4DOF
* improve logging of initial intristics
* sttiching: add affine bundle adjuster test
* fix build warnings
* stitching: increase limit on sanity check
prevents spurious test failures on mac. values are still pretty fine.
* stitching: set affine bundle adjuster for SCANS mode
* fix bug with AffineBestOf2NearestMatcher (we want to select affine partial mode)
* select right bundle adjuster
* stitching: increase error bound for matcher tests
* this prevents failure on mac. tranformation is still ok.
* stitching: implement affine bundle adjuster
* implements affine bundle adjuster that is using full affine transform
* existing test case modified to test both affinePartial an full affine bundle adjuster
* add stitching tutorial
* show basic usage of stitching api (Stitcher class)
* stitching: add more integration test for affine stitching
* added new datasets to existing testcase
* removed unused include
* calib3d: move `haveCollinearPoints` to common header
* added comment to make that this also checks too close points
* calib3d: redone checkSubset for estimateAffine* callback
* use common function to check collinearity
* this also ensures that point will not be too close to each other
* calib3d: change estimateAffine* functions API
* more similar to `findHomography`, `findFundamentalMat`, `findEssentialMat` and similar
* follows standard recommended semantic INPUTS, OUTPUTS, FLAGS
* allows to disable refining
* supported LMEDS robust method (tests yet to come) along with RANSAC
* extended docs with some tips
* calib3d: rewrite estimateAffine2D test
* rewrite in googletest style
* parametrize to test both robust methods (RANSAC and LMEDS)
* get rid of boilerplate
* calib3d: rework estimateAffinePartial2D test
* rework in googletest style
* add testing for LMEDS
* calib3d: rework estimateAffine*2D perf test
* test for LMEDS speed
* test with/without Levenberg-Marquart
* remove sanity checking (this is covered by accuracy tests)
* calib3d: improve estimateAffine*2D tests
* test transformations in loop
* improves test by testing more potential transformations
* calib3d: rewrite kernels for estimateAffine*2D functions
* use analytical solution instead of SVD
* this version is faster especially for smaller amount of points
* calib3d: tune up perf of estimateAffine*2D functions
* avoid copying inliers
* avoid converting input points if not necessary
* check only `from` point for collinearity, as `to` does not affect stability of transform
* tutorials: add commands examples to stitching tutorials
* add some examples how to run stitcher sample code
* mention stitching_detailed.cpp
* calib3d: change computeError for estimateAffine*2D
* do error computing in floats instead of doubles
this have required precision + we were storing the result in float anyway. This make code faster and allows auto-vectorization by smart compilers.
* documentation: mention estimateAffine*2D function
* refer to new functions on appropriate places
* prefer estimateAffine*2D over estimateRigidTransform
* stitching: add camera models documentations
* mention camera models in module documentation to give user a better overview and reduce confusion
2016-10-23 00:10:42 +08:00
|
|
|
matches_info.H.push_back(Mat::zeros(1, 3, CV_64F));
|
|
|
|
matches_info.H.at<double>(2, 2) = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-10-17 15:12:04 +08:00
|
|
|
} // namespace detail
|
|
|
|
} // namespace cv
|