Merge pull request #2565 from apavlenko:tapi_stitching

This commit is contained in:
Andrey Pavlenko 2014-04-15 19:56:11 +04:00 committed by OpenCV Buildbot
commit dab8e920b0
35 changed files with 1162 additions and 625 deletions

View File

@ -218,6 +218,9 @@ public:
virtual void release() const;
virtual void clear() const;
virtual void setTo(const _InputArray& value, const _InputArray & mask = _InputArray()) const;
void assign(const UMat& u) const;
void assign(const Mat& m) const;
};

View File

@ -598,6 +598,8 @@ CV_EXPORTS int predictOptimalVectorWidth(InputArray src1, InputArray src2 = noAr
InputArray src4 = noArray(), InputArray src5 = noArray(), InputArray src6 = noArray(),
InputArray src7 = noArray(), InputArray src8 = noArray(), InputArray src9 = noArray());
CV_EXPORTS void buildOptionsAddMatrixDescription(String& buildOptions, const String& name, InputArray _m);
class CV_EXPORTS Image2D
{
public:

View File

@ -1369,6 +1369,21 @@ void _InputArray::getUMatVector(std::vector<UMat>& umv) const
return;
}
if( k == UMAT )
{
UMat& v = *(UMat*)obj;
umv.resize(1);
umv[0] = v;
return;
}
if( k == MAT )
{
Mat& v = *(Mat*)obj;
umv.resize(1);
umv[0] = v.getUMat(accessFlags);
return;
}
CV_Error(Error::StsNotImplemented, "Unknown/unsupported array type");
}
@ -2592,6 +2607,43 @@ void _OutputArray::setTo(const _InputArray& arr, const _InputArray & mask) const
CV_Error(Error::StsNotImplemented, "");
}
void _OutputArray::assign(const UMat& u) const
{
int k = kind();
if (k == UMAT)
{
*(UMat*)obj = u;
}
else if (k == MAT)
{
u.copyTo(*(Mat*)obj); // TODO check u.getMat()
}
else
{
CV_Error(Error::StsNotImplemented, "");
}
}
void _OutputArray::assign(const Mat& m) const
{
int k = kind();
if (k == UMAT)
{
m.copyTo(*(UMat*)obj); // TODO check m.getUMat()
}
else if (k == MAT)
{
*(Mat*)obj = m;
}
else
{
CV_Error(Error::StsNotImplemented, "");
}
}
static _InputOutputArray _none;
InputOutputArray noArray() { return _none; }

View File

@ -3798,11 +3798,16 @@ public:
cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr();
if( u->refcount == 0 )
// FIXIT Workaround for UMat synchronization issue
// if( u->refcount == 0 )
{
if( !u->copyOnMap() )
{
CV_Assert(u->data == 0);
if (u->data) // FIXIT Workaround for UMat synchronization issue
{
//CV_Assert(u->hostCopyObsolete() == false);
return;
}
// because there can be other map requests for the same UMat with different access flags,
// we use the universal (read-write) access mode.
cl_int retval = 0;
@ -3844,6 +3849,10 @@ public:
UMatDataAutoLock autolock(u);
// FIXIT Workaround for UMat synchronization issue
if(u->refcount > 0)
return;
cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr();
cl_int retval = 0;
if( !u->copyOnMap() && u->data )
@ -4404,7 +4413,24 @@ int predictOptimalVectorWidth(InputArray src1, InputArray src2, InputArray src3,
#undef PROCESS_SRC
/////////////////////////////////////////// Image2D ////////////////////////////////////////////////////
// TODO Make this as a method of OpenCL "BuildOptions" class
void buildOptionsAddMatrixDescription(String& buildOptions, const String& name, InputArray _m)
{
if (!buildOptions.empty())
buildOptions += " ";
int type = _m.type(), depth = CV_MAT_DEPTH(type);
buildOptions += format(
"-D %s_T=%s -D %s_T1=%s -D %s_CN=%d -D %s_TSIZE=%d -D %s_T1SIZE=%d -D %s_DEPTH=%d",
name.c_str(), ocl::typeToStr(type),
name.c_str(), ocl::typeToStr(CV_MAKE_TYPE(depth, 1)),
name.c_str(), (int)CV_MAT_CN(type),
name.c_str(), (int)CV_ELEM_SIZE(type),
name.c_str(), (int)CV_ELEM_SIZE1(type),
name.c_str(), (int)depth
);
}
struct Image2D::Impl
{

View File

@ -330,7 +330,7 @@ static bool ocl_match2Dispatcher(InputArray query, InputArray train, const UMat
static bool ocl_kmatchDispatcher(InputArray query, InputArray train, const UMat &trainIdx,
const UMat &distance, int distType)
{
return ocl_match2Dispatcher(query, train, trainIdx, distance, distType);
return ocl_match2Dispatcher(query, train, trainIdx, distance, distType);
}
static bool ocl_knnMatchSingle(InputArray query, InputArray train, UMat &trainIdx,
@ -1209,8 +1209,8 @@ FlannBasedMatcher::FlannBasedMatcher( const Ptr<flann::IndexParams>& _indexParam
void FlannBasedMatcher::add( InputArrayOfArrays _descriptors )
{
DescriptorMatcher::add( _descriptors );
std::vector<Mat> descriptors;
_descriptors.getMatVector(descriptors);
std::vector<UMat> descriptors;
_descriptors.getUMatVector(descriptors);
for( size_t i = 0; i < descriptors.size(); i++ )
{
@ -1232,6 +1232,13 @@ void FlannBasedMatcher::train()
{
if( !flannIndex || mergedDescriptors.size() < addedDescCount )
{
// FIXIT: Workaround for 'utrainDescCollection' issue (PR #2142)
if (!utrainDescCollection.empty())
{
CV_Assert(trainDescCollection.size() == 0);
for (size_t i = 0; i < utrainDescCollection.size(); ++i)
trainDescCollection.push_back(utrainDescCollection[i].getMat(ACCESS_READ));
}
mergedDescriptors.set( trainDescCollection );
flannIndex = makePtr<flann::Index>( mergedDescriptors.getDescriptors(), *indexParams );
}

View File

@ -43,7 +43,7 @@ detail::Blender::feed
Processes the image.
.. ocv:function:: void detail::Blender::feed(const Mat &img, const Mat &mask, Point tl)
.. ocv:function:: void detail::Blender::feed(InputArray img, InputArray mask, Point tl)
:param img: Source image
@ -56,7 +56,7 @@ detail::Blender::blend
Blends and returns the final pano.
.. ocv:function:: void detail::Blender::blend(Mat &dst, Mat &dst_mask)
.. ocv:function:: void detail::Blender::blend(InputOutputArray dst, InputOutputArray dst_mask)
:param dst: Final pano

View File

@ -27,9 +27,9 @@ Base class for all exposure compensators. ::
detail::ExposureCompensator::feed
----------------------------------
.. ocv:function:: void detail::ExposureCompensator::feed(const std::vector<Point> &corners, const std::vector<Mat> &images, const std::vector<Mat> &masks)
.. ocv:function:: void detail::ExposureCompensator::feed(const std::vector<Point> &corners, const std::vector<UMat> &images, const std::vector<UMat> &masks)
.. ocv:function:: void detail::ExposureCompensator::feed(const std::vector<Point> &corners, const std::vector<Mat> &images, const std::vector<std::pair<Mat,uchar> > &masks)
.. ocv:function:: void detail::ExposureCompensator::feed(const std::vector<Point> &corners, const std::vector<UMat> &images, const std::vector<std::pair<UMat,uchar> > &masks)
:param corners: Source image top-left corners
@ -42,7 +42,7 @@ detil::ExposureCompensator::apply
Compensate exposure in the specified image.
.. ocv:function:: void detail::ExposureCompensator::apply(int index, Point corner, Mat &image, const Mat &mask)
.. ocv:function:: void detail::ExposureCompensator::apply(int index, Point corner, InputOutputArray image, InputArray mask)
:param index: Image index

View File

@ -110,9 +110,9 @@ These functions try to match the given images and to estimate rotations of each
.. note:: Use the functions only if you're aware of the stitching pipeline, otherwise use :ocv:func:`Stitcher::stitch`.
.. ocv:function:: Status Stitcher::estimateTransform(InputArray images)
.. ocv:function:: Status Stitcher::estimateTransform(InputArrayOfArrays images)
.. ocv:function:: Status Stitcher::estimateTransform(InputArray images, const std::vector<std::vector<Rect> > &rois)
.. ocv:function:: Status Stitcher::estimateTransform(InputArrayOfArrays images, const std::vector<std::vector<Rect> > &rois)
:param images: Input images.
@ -129,7 +129,7 @@ These functions try to compose the given images (or images stored internally fro
.. ocv:function:: Status Stitcher::composePanorama(OutputArray pano)
.. ocv:function:: Status Stitcher::composePanorama(InputArray images, OutputArray pano)
.. ocv:function:: Status Stitcher::composePanorama(InputArrayOfArrays images, OutputArray pano)
:param images: Input images.
@ -142,9 +142,9 @@ Stitcher::stitch
These functions try to stitch the given images.
.. ocv:function:: Status Stitcher::stitch(InputArray images, OutputArray pano)
.. ocv:function:: Status Stitcher::stitch(InputArrayOfArrays images, OutputArray pano)
.. ocv:function:: Status Stitcher::stitch(InputArray images, const std::vector<std::vector<Rect> > &rois, OutputArray pano)
.. ocv:function:: Status Stitcher::stitch(InputArrayOfArrays images, const std::vector<std::vector<Rect> > &rois, OutputArray pano)
:param images: Input images.

View File

@ -40,9 +40,9 @@ detail::FeaturesFinder::operator()
Finds features in the given image.
.. ocv:function:: void detail::FeaturesFinder::operator ()(const Mat &image, ImageFeatures &features)
.. ocv:function:: void detail::FeaturesFinder::operator ()(InputArray image, ImageFeatures &features)
.. ocv:function:: void detail::FeaturesFinder::operator ()(const Mat &image, ImageFeatures &features, const std::vector<cv::Rect> &rois)
.. ocv:function:: void detail::FeaturesFinder::operator ()(InputArray image, ImageFeatures &features, const std::vector<cv::Rect> &rois)
:param image: Source image
@ -64,7 +64,7 @@ detail::FeaturesFinder::find
This method must implement features finding logic in order to make the wrappers `detail::FeaturesFinder::operator()`_ work.
.. ocv:function:: void detail::FeaturesFinder::find(const Mat &image, ImageFeatures &features)
.. ocv:function:: void detail::FeaturesFinder::find(InputArray image, ImageFeatures &features)
:param image: Source image
@ -171,7 +171,7 @@ Performs images matching.
:param matches_info: Found matches
.. ocv:function:: void detail::FeaturesMatcher::operator ()( const std::vector<ImageFeatures> & features, std::vector<MatchesInfo> & pairwise_matches, const Mat & mask=Mat() )
.. ocv:function:: void detail::FeaturesMatcher::operator ()( const std::vector<ImageFeatures> & features, std::vector<MatchesInfo> & pairwise_matches, const UMat & mask=UMat() )
:param features: Features of the source images

View File

@ -22,7 +22,7 @@ detail::SeamFinder::find
Estimates seams.
.. ocv:function:: void detail::SeamFinder::find(const std::vector<Mat> &src, const std::vector<Point> &corners, std::vector<Mat> &masks)
.. ocv:function:: void detail::SeamFinder::find(const std::vector<UMat> &src, const std::vector<Point> &corners, std::vector<UMat> &masks)
:param src: Source images

View File

@ -98,8 +98,8 @@ public:
void setFeaturesMatcher(Ptr<detail::FeaturesMatcher> features_matcher)
{ features_matcher_ = features_matcher; }
const cv::Mat& matchingMask() const { return matching_mask_; }
void setMatchingMask(const cv::Mat &mask)
const cv::UMat& matchingMask() const { return matching_mask_; }
void setMatchingMask(const cv::UMat &mask)
{
CV_Assert(mask.type() == CV_8U && mask.cols == mask.rows);
matching_mask_ = mask.clone();
@ -127,14 +127,14 @@ public:
const Ptr<detail::Blender> blender() const { return blender_; }
void setBlender(Ptr<detail::Blender> b) { blender_ = b; }
Status estimateTransform(InputArray images);
Status estimateTransform(InputArray images, const std::vector<std::vector<Rect> > &rois);
Status estimateTransform(InputArrayOfArrays images);
Status estimateTransform(InputArrayOfArrays images, const std::vector<std::vector<Rect> > &rois);
Status composePanorama(OutputArray pano);
Status composePanorama(InputArray images, OutputArray pano);
Status composePanorama(InputArrayOfArrays images, OutputArray pano);
Status stitch(InputArray images, OutputArray pano);
Status stitch(InputArray images, const std::vector<std::vector<Rect> > &rois, OutputArray pano);
Status stitch(InputArrayOfArrays images, OutputArray pano);
Status stitch(InputArrayOfArrays images, const std::vector<std::vector<Rect> > &rois, OutputArray pano);
std::vector<int> component() const { return indices_; }
std::vector<detail::CameraParams> cameras() const { return cameras_; }
@ -152,7 +152,7 @@ private:
double conf_thresh_;
Ptr<detail::FeaturesFinder> features_finder_;
Ptr<detail::FeaturesMatcher> features_matcher_;
cv::Mat matching_mask_;
cv::UMat matching_mask_;
Ptr<detail::BundleAdjusterBase> bundle_adjuster_;
bool do_wave_correct_;
detail::WaveCorrectKind wave_correct_kind_;
@ -161,12 +161,12 @@ private:
Ptr<detail::SeamFinder> seam_finder_;
Ptr<detail::Blender> blender_;
std::vector<cv::Mat> imgs_;
std::vector<cv::UMat> imgs_;
std::vector<std::vector<cv::Rect> > rois_;
std::vector<cv::Size> full_img_sizes_;
std::vector<detail::ImageFeatures> features_;
std::vector<detail::MatchesInfo> pairwise_matches_;
std::vector<cv::Mat> seam_est_imgs_;
std::vector<cv::UMat> seam_est_imgs_;
std::vector<int> indices_;
std::vector<detail::CameraParams> cameras_;
double work_scale_;

View File

@ -60,11 +60,11 @@ public:
void prepare(const std::vector<Point> &corners, const std::vector<Size> &sizes);
virtual void prepare(Rect dst_roi);
virtual void feed(const Mat &img, const Mat &mask, Point tl);
virtual void blend(Mat &dst, Mat &dst_mask);
virtual void feed(InputArray img, InputArray mask, Point tl);
virtual void blend(InputOutputArray dst, InputOutputArray dst_mask);
protected:
Mat dst_, dst_mask_;
UMat dst_, dst_mask_;
Rect dst_roi_;
};
@ -78,18 +78,18 @@ public:
void setSharpness(float val) { sharpness_ = val; }
void prepare(Rect dst_roi);
void feed(const Mat &img, const Mat &mask, Point tl);
void blend(Mat &dst, Mat &dst_mask);
void feed(InputArray img, InputArray mask, Point tl);
void blend(InputOutputArray dst, InputOutputArray dst_mask);
// Creates weight maps for fixed set of source images by their masks and top-left corners.
// Final image can be obtained by simple weighting of the source images.
Rect createWeightMaps(const std::vector<Mat> &masks, const std::vector<Point> &corners,
std::vector<Mat> &weight_maps);
Rect createWeightMaps(const std::vector<UMat> &masks, const std::vector<Point> &corners,
std::vector<UMat> &weight_maps);
private:
float sharpness_;
Mat weight_map_;
Mat dst_weight_map_;
UMat weight_map_;
UMat dst_weight_map_;
};
inline FeatherBlender::FeatherBlender(float _sharpness) { setSharpness(_sharpness); }
@ -104,13 +104,13 @@ public:
void setNumBands(int val) { actual_num_bands_ = val; }
void prepare(Rect dst_roi);
void feed(const Mat &img, const Mat &mask, Point tl);
void blend(Mat &dst, Mat &dst_mask);
void feed(InputArray img, InputArray mask, Point tl);
void blend(InputOutputArray dst, InputOutputArray dst_mask);
private:
int actual_num_bands_, num_bands_;
std::vector<Mat> dst_pyr_laplace_;
std::vector<Mat> dst_band_weights_;
std::vector<UMat> dst_pyr_laplace_;
std::vector<UMat> dst_band_weights_;
Rect dst_roi_final_;
bool can_use_gpu_;
int weight_type_; //CV_32F or CV_16S
@ -120,16 +120,16 @@ private:
//////////////////////////////////////////////////////////////////////////////
// Auxiliary functions
void CV_EXPORTS normalizeUsingWeightMap(const Mat& weight, Mat& src);
void CV_EXPORTS normalizeUsingWeightMap(InputArray weight, InputOutputArray src);
void CV_EXPORTS createWeightMap(const Mat& mask, float sharpness, Mat& weight);
void CV_EXPORTS createWeightMap(InputArray mask, float sharpness, InputOutputArray weight);
void CV_EXPORTS createLaplacePyr(const Mat &img, int num_levels, std::vector<Mat>& pyr);
void CV_EXPORTS createLaplacePyrGpu(const Mat &img, int num_levels, std::vector<Mat>& pyr);
void CV_EXPORTS createLaplacePyr(InputArray img, int num_levels, std::vector<UMat>& pyr);
void CV_EXPORTS createLaplacePyrGpu(InputArray img, int num_levels, std::vector<UMat>& pyr);
// Restores source image
void CV_EXPORTS restoreImageFromLaplacePyr(std::vector<Mat>& pyr);
void CV_EXPORTS restoreImageFromLaplacePyrGpu(std::vector<Mat>& pyr);
void CV_EXPORTS restoreImageFromLaplacePyr(std::vector<UMat>& pyr);
void CV_EXPORTS restoreImageFromLaplacePyrGpu(std::vector<UMat>& pyr);
} // namespace detail
} // namespace cv

View File

@ -56,29 +56,29 @@ public:
enum { NO, GAIN, GAIN_BLOCKS };
static Ptr<ExposureCompensator> createDefault(int type);
void feed(const std::vector<Point> &corners, const std::vector<Mat> &images,
const std::vector<Mat> &masks);
virtual void feed(const std::vector<Point> &corners, const std::vector<Mat> &images,
const std::vector<std::pair<Mat,uchar> > &masks) = 0;
virtual void apply(int index, Point corner, Mat &image, const Mat &mask) = 0;
void feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
const std::vector<UMat> &masks);
virtual void feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
const std::vector<std::pair<UMat,uchar> > &masks) = 0;
virtual void apply(int index, Point corner, InputOutputArray image, InputArray mask) = 0;
};
class CV_EXPORTS NoExposureCompensator : public ExposureCompensator
{
public:
void feed(const std::vector<Point> &/*corners*/, const std::vector<Mat> &/*images*/,
const std::vector<std::pair<Mat,uchar> > &/*masks*/) { }
void apply(int /*index*/, Point /*corner*/, Mat &/*image*/, const Mat &/*mask*/) { }
void feed(const std::vector<Point> &/*corners*/, const std::vector<UMat> &/*images*/,
const std::vector<std::pair<UMat,uchar> > &/*masks*/) { }
void apply(int /*index*/, Point /*corner*/, InputOutputArray /*image*/, InputArray /*mask*/) { }
};
class CV_EXPORTS GainCompensator : public ExposureCompensator
{
public:
void feed(const std::vector<Point> &corners, const std::vector<Mat> &images,
const std::vector<std::pair<Mat,uchar> > &masks);
void apply(int index, Point corner, Mat &image, const Mat &mask);
void feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
const std::vector<std::pair<UMat,uchar> > &masks);
void apply(int index, Point corner, InputOutputArray image, InputArray mask);
std::vector<double> gains() const;
private:
@ -91,13 +91,13 @@ class CV_EXPORTS BlocksGainCompensator : public ExposureCompensator
public:
BlocksGainCompensator(int bl_width = 32, int bl_height = 32)
: bl_width_(bl_width), bl_height_(bl_height) {}
void feed(const std::vector<Point> &corners, const std::vector<Mat> &images,
const std::vector<std::pair<Mat,uchar> > &masks);
void apply(int index, Point corner, Mat &image, const Mat &mask);
void feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
const std::vector<std::pair<UMat,uchar> > &masks);
void apply(int index, Point corner, InputOutputArray image, InputArray mask);
private:
int bl_width_, bl_height_;
std::vector<Mat_<float> > gain_maps_;
std::vector<UMat> gain_maps_;
};
} // namespace detail

View File

@ -60,7 +60,7 @@ struct CV_EXPORTS ImageFeatures
int img_idx;
Size img_size;
std::vector<KeyPoint> keypoints;
Mat descriptors;
UMat descriptors;
};
@ -68,12 +68,12 @@ class CV_EXPORTS FeaturesFinder
{
public:
virtual ~FeaturesFinder() {}
void operator ()(const Mat &image, ImageFeatures &features);
void operator ()(const Mat &image, ImageFeatures &features, const std::vector<cv::Rect> &rois);
void operator ()(InputArray image, ImageFeatures &features);
void operator ()(InputArray image, ImageFeatures &features, const std::vector<cv::Rect> &rois);
virtual void collectGarbage() {}
protected:
virtual void find(const Mat &image, ImageFeatures &features) = 0;
virtual void find(InputArray image, ImageFeatures &features) = 0;
};
@ -84,7 +84,7 @@ public:
int num_octaves_descr = /*4*/3, int num_layers_descr = /*2*/4);
private:
void find(const Mat &image, ImageFeatures &features);
void find(InputArray image, ImageFeatures &features);
Ptr<FeatureDetector> detector_;
Ptr<DescriptorExtractor> extractor_;
@ -97,7 +97,7 @@ public:
OrbFeaturesFinder(Size _grid_size = Size(3,1), int nfeatures=1500, float scaleFactor=1.3f, int nlevels=5);
private:
void find(const Mat &image, ImageFeatures &features);
void find(InputArray image, ImageFeatures &features);
Ptr<ORB> orb;
Size grid_size;
@ -114,7 +114,7 @@ public:
void collectGarbage();
private:
void find(const Mat &image, ImageFeatures &features);
void find(InputArray image, ImageFeatures &features);
cuda::GpuMat image_;
cuda::GpuMat gray_image_;
@ -151,7 +151,7 @@ public:
MatchesInfo& matches_info) { match(features1, features2, matches_info); }
void operator ()(const std::vector<ImageFeatures> &features, std::vector<MatchesInfo> &pairwise_matches,
const cv::Mat &mask = cv::Mat());
const cv::UMat &mask = cv::UMat());
bool isThreadSafe() const { return is_thread_safe_; }

View File

@ -54,32 +54,32 @@ class CV_EXPORTS SeamFinder
{
public:
virtual ~SeamFinder() {}
virtual void find(const std::vector<Mat> &src, const std::vector<Point> &corners,
std::vector<Mat> &masks) = 0;
virtual void find(const std::vector<UMat> &src, const std::vector<Point> &corners,
std::vector<UMat> &masks) = 0;
};
class CV_EXPORTS NoSeamFinder : public SeamFinder
{
public:
void find(const std::vector<Mat>&, const std::vector<Point>&, std::vector<Mat>&) {}
void find(const std::vector<UMat>&, const std::vector<Point>&, std::vector<UMat>&) {}
};
class CV_EXPORTS PairwiseSeamFinder : public SeamFinder
{
public:
virtual void find(const std::vector<Mat> &src, const std::vector<Point> &corners,
std::vector<Mat> &masks);
virtual void find(const std::vector<UMat> &src, const std::vector<Point> &corners,
std::vector<UMat> &masks);
protected:
void run();
virtual void findInPair(size_t first, size_t second, Rect roi) = 0;
std::vector<Mat> images_;
std::vector<UMat> images_;
std::vector<Size> sizes_;
std::vector<Point> corners_;
std::vector<Mat> masks_;
std::vector<UMat> masks_;
};
@ -87,7 +87,7 @@ class CV_EXPORTS VoronoiSeamFinder : public PairwiseSeamFinder
{
public:
virtual void find(const std::vector<Size> &size, const std::vector<Point> &corners,
std::vector<Mat> &masks);
std::vector<UMat> &masks);
private:
void findInPair(size_t first, size_t second, Rect roi);
};
@ -103,8 +103,8 @@ public:
CostFunction costFunction() const { return costFunc_; }
void setCostFunction(CostFunction val) { costFunc_ = val; }
virtual void find(const std::vector<Mat> &src, const std::vector<Point> &corners,
std::vector<Mat> &masks);
virtual void find(const std::vector<UMat> &src, const std::vector<Point> &corners,
std::vector<UMat> &masks);
private:
enum ComponentState
@ -154,7 +154,7 @@ private:
};
void process(
const Mat &image1, const Mat &image2, Point tl1, Point tl2, Mat &mask1, Mat &mask2);
const Mat &image1, const Mat &image2, Point tl1, Point tl2, Mat &mask1, Mat &mask2);
void findComponents();
@ -217,8 +217,8 @@ public:
~GraphCutSeamFinder();
void find(const std::vector<Mat> &src, const std::vector<Point> &corners,
std::vector<Mat> &masks);
void find(const std::vector<UMat> &src, const std::vector<Point> &corners,
std::vector<UMat> &masks);
private:
// To avoid GCGraph dependency
@ -236,8 +236,8 @@ public:
: cost_type_(cost_type), terminal_cost_(terminal_cost),
bad_region_penalty_(bad_region_penalty) {}
void find(const std::vector<cv::Mat> &src, const std::vector<cv::Point> &corners,
std::vector<cv::Mat> &masks);
void find(const std::vector<cv::UMat> &src, const std::vector<cv::Point> &corners,
std::vector<cv::UMat> &masks);
void findInPair(size_t first, size_t second, Rect roi);
private:

View File

@ -71,6 +71,7 @@
#define LOG_(_level, _msg) \
for(;;) \
{ \
using namespace std; \
if ((_level) >= ::cv::detail::stitchingLogLevel()) \
{ \
LOG_STITCHING_MSG(_msg); \
@ -145,7 +146,7 @@ private:
// Auxiliary functions
CV_EXPORTS bool overlapRoi(Point tl1, Point tl2, Size sz1, Size sz2, Rect &roi);
CV_EXPORTS Rect resultRoi(const std::vector<Point> &corners, const std::vector<Mat> &images);
CV_EXPORTS Rect resultRoi(const std::vector<Point> &corners, const std::vector<UMat> &images);
CV_EXPORTS Rect resultRoi(const std::vector<Point> &corners, const std::vector<Size> &sizes);
CV_EXPORTS Point resultTl(const std::vector<Point> &corners);

View File

@ -160,6 +160,8 @@ class CV_EXPORTS SphericalWarper : public RotationWarperBase<SphericalProjector>
public:
SphericalWarper(float scale) { projector_.scale = scale; }
Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap);
Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode, OutputArray dst);
protected:
void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br);
};
@ -178,6 +180,8 @@ class CV_EXPORTS CylindricalWarper : public RotationWarperBase<CylindricalProjec
public:
CylindricalWarper(float scale) { projector_.scale = scale; }
Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap);
Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode, OutputArray dst);
protected:
void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br)
{
@ -503,45 +507,6 @@ protected:
}
};
/////////////////////////////////////// OpenCL Accelerated Warpers /////////////////////////////////////
class CV_EXPORTS PlaneWarperOcl : public PlaneWarper
{
public:
PlaneWarperOcl(float scale = 1.f) : PlaneWarper(scale) { }
virtual Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap)
{
return buildMaps(src_size, K, R, Mat::zeros(3, 1, CV_32FC1), xmap, ymap);
}
virtual Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode, OutputArray dst)
{
return warp(src, K, R, Mat::zeros(3, 1, CV_32FC1), interp_mode, border_mode, dst);
}
virtual Rect buildMaps(Size src_size, InputArray K, InputArray R, InputArray T, OutputArray xmap, OutputArray ymap);
virtual Point warp(InputArray src, InputArray K, InputArray R, InputArray T, int interp_mode, int border_mode, OutputArray dst);
};
class CV_EXPORTS SphericalWarperOcl : public SphericalWarper
{
public:
SphericalWarperOcl(float scale) : SphericalWarper(scale) { }
virtual Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap);
virtual Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode, OutputArray dst);
};
class CV_EXPORTS CylindricalWarperOcl : public CylindricalWarper
{
public:
CylindricalWarperOcl(float scale) : CylindricalWarper(scale) { }
virtual Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap);
virtual Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode, OutputArray dst);
};
} // namespace detail
} // namespace cv

View File

@ -92,7 +92,7 @@ template <class P>
Point RotationWarperBase<P>::warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,
OutputArray dst)
{
Mat xmap, ymap;
UMat xmap, ymap;
Rect dst_roi = buildMaps(src.size(), K, R, xmap, ymap);
dst.create(dst_roi.height + 1, dst_roi.width + 1, src.type());

View File

@ -167,24 +167,6 @@ public:
};
#endif
class PlaneWarperOcl: public WarperCreator
{
public:
Ptr<detail::RotationWarper> create(float scale) const { return makePtr<detail::PlaneWarperOcl>(scale); }
};
class SphericalWarperOcl: public WarperCreator
{
public:
Ptr<detail::RotationWarper> create(float scale) const { return makePtr<detail::SphericalWarperOcl>(scale); }
};
class CylindricalWarperOcl: public WarperCreator
{
public:
Ptr<detail::RotationWarper> create(float scale) const { return makePtr<detail::CylindricalWarperOcl>(scale); }
};
} // namespace cv
#endif // __OPENCV_STITCHING_WARPER_CREATORS_HPP__

View File

@ -0,0 +1,144 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2014, Itseez, Inc, all rights reserved.
#include "perf_precomp.hpp"
#include "opencv2/ts/ocl_perf.hpp"
using namespace cv;
using namespace perf;
using namespace cvtest::ocl;
using namespace std;
using namespace std::tr1;
#define SURF_MATCH_CONFIDENCE 0.65f
#define ORB_MATCH_CONFIDENCE 0.3f
#define WORK_MEGAPIX 0.6
typedef TestBaseWithParam<string> stitch;
#ifdef HAVE_OPENCV_NONFREE_TODO_FIND_WHY_SURF_IS_NOT_ABLE_TO_STITCH_PANOS
#define TEST_DETECTORS testing::Values("surf", "orb")
#else
#define TEST_DETECTORS testing::Values<string>("orb")
#endif
OCL_PERF_TEST_P(stitch, a123, TEST_DETECTORS)
{
UMat pano;
vector<Mat> _imgs;
_imgs.push_back( imread( getDataPath("stitching/a1.png") ) );
_imgs.push_back( imread( getDataPath("stitching/a2.png") ) );
_imgs.push_back( imread( getDataPath("stitching/a3.png") ) );
vector<UMat> imgs = ToUMat(_imgs);
Ptr<detail::FeaturesFinder> featuresFinder = GetParam() == "orb"
? Ptr<detail::FeaturesFinder>(new detail::OrbFeaturesFinder())
: Ptr<detail::FeaturesFinder>(new detail::SurfFeaturesFinder());
Ptr<detail::FeaturesMatcher> featuresMatcher = GetParam() == "orb"
? makePtr<detail::BestOf2NearestMatcher>(false, ORB_MATCH_CONFIDENCE)
: makePtr<detail::BestOf2NearestMatcher>(false, SURF_MATCH_CONFIDENCE);
declare.iterations(20);
while(next())
{
Stitcher stitcher = Stitcher::createDefault();
stitcher.setFeaturesFinder(featuresFinder);
stitcher.setFeaturesMatcher(featuresMatcher);
stitcher.setWarper(makePtr<SphericalWarper>());
stitcher.setRegistrationResol(WORK_MEGAPIX);
startTimer();
stitcher.stitch(imgs, pano);
stopTimer();
}
EXPECT_NEAR(pano.size().width, 1182, 50);
EXPECT_NEAR(pano.size().height, 682, 30);
SANITY_CHECK_NOTHING();
}
OCL_PERF_TEST_P(stitch, b12, TEST_DETECTORS)
{
UMat pano;
vector<Mat> imgs;
imgs.push_back( imread( getDataPath("stitching/b1.png") ) );
imgs.push_back( imread( getDataPath("stitching/b2.png") ) );
Ptr<detail::FeaturesFinder> featuresFinder = GetParam() == "orb"
? Ptr<detail::FeaturesFinder>(new detail::OrbFeaturesFinder())
: Ptr<detail::FeaturesFinder>(new detail::SurfFeaturesFinder());
Ptr<detail::FeaturesMatcher> featuresMatcher = GetParam() == "orb"
? makePtr<detail::BestOf2NearestMatcher>(false, ORB_MATCH_CONFIDENCE)
: makePtr<detail::BestOf2NearestMatcher>(false, SURF_MATCH_CONFIDENCE);
declare.iterations(20);
while(next())
{
Stitcher stitcher = Stitcher::createDefault();
stitcher.setFeaturesFinder(featuresFinder);
stitcher.setFeaturesMatcher(featuresMatcher);
stitcher.setWarper(makePtr<SphericalWarper>());
stitcher.setRegistrationResol(WORK_MEGAPIX);
startTimer();
stitcher.stitch(imgs, pano);
stopTimer();
}
EXPECT_NEAR(pano.size().width, 1124, 50);
EXPECT_NEAR(pano.size().height, 644, 30);
SANITY_CHECK_NOTHING();
}
OCL_PERF_TEST_P(stitch, boat, TEST_DETECTORS)
{
UMat pano;
vector<Mat> _imgs;
_imgs.push_back( imread( getDataPath("stitching/boat1.jpg") ) );
_imgs.push_back( imread( getDataPath("stitching/boat2.jpg") ) );
_imgs.push_back( imread( getDataPath("stitching/boat3.jpg") ) );
_imgs.push_back( imread( getDataPath("stitching/boat4.jpg") ) );
_imgs.push_back( imread( getDataPath("stitching/boat5.jpg") ) );
_imgs.push_back( imread( getDataPath("stitching/boat6.jpg") ) );
vector<UMat> imgs = ToUMat(_imgs);
Ptr<detail::FeaturesFinder> featuresFinder = GetParam() == "orb"
? Ptr<detail::FeaturesFinder>(new detail::OrbFeaturesFinder())
: Ptr<detail::FeaturesFinder>(new detail::SurfFeaturesFinder());
Ptr<detail::FeaturesMatcher> featuresMatcher = GetParam() == "orb"
? makePtr<detail::BestOf2NearestMatcher>(false, ORB_MATCH_CONFIDENCE)
: makePtr<detail::BestOf2NearestMatcher>(false, SURF_MATCH_CONFIDENCE);
declare.iterations(20);
while(next())
{
Stitcher stitcher = Stitcher::createDefault();
stitcher.setFeaturesFinder(featuresFinder);
stitcher.setFeaturesMatcher(featuresMatcher);
stitcher.setWarper(makePtr<SphericalWarper>());
stitcher.setRegistrationResol(WORK_MEGAPIX);
startTimer();
stitcher.stitch(imgs, pano);
stopTimer();
}
EXPECT_NEAR(pano.size().width, 10789, 200);
EXPECT_NEAR(pano.size().height, 2663, 100);
SANITY_CHECK_NOTHING();
}

View File

@ -63,24 +63,12 @@ public:
explicit WarperBase(int type, Size srcSize)
{
Ptr<WarperCreator> creator;
if (cv::ocl::useOpenCL())
{
if (type == SphericalWarperType)
creator = makePtr<SphericalWarperOcl>();
else if (type == CylindricalWarperType)
creator = makePtr<CylindricalWarperOcl>();
else if (type == PlaneWarperType)
creator = makePtr<PlaneWarperOcl>();
}
else
{
if (type == SphericalWarperType)
creator = makePtr<SphericalWarper>();
else if (type == CylindricalWarperType)
creator = makePtr<CylindricalWarper>();
else if (type == PlaneWarperType)
creator = makePtr<PlaneWarper>();
}
if (type == SphericalWarperType)
creator = makePtr<SphericalWarper>();
else if (type == CylindricalWarperType)
creator = makePtr<CylindricalWarper>();
else if (type == PlaneWarperType)
creator = makePtr<PlaneWarper>();
CV_Assert(!creator.empty());
K = Mat::eye(3, 3, CV_32FC1);

View File

@ -41,6 +41,7 @@
//M*/
#include "precomp.hpp"
#include "opencl_kernels.hpp"
namespace cv {
namespace detail {
@ -76,8 +77,13 @@ void Blender::prepare(Rect dst_roi)
}
void Blender::feed(const Mat &img, const Mat &mask, Point tl)
void Blender::feed(InputArray _img, InputArray _mask, Point tl)
{
Mat img = _img.getMat();
Mat mask = _mask.getMat();
Mat dst = dst_.getMat(ACCESS_RW);
Mat dst_mask = dst_mask_.getMat(ACCESS_RW);
CV_Assert(img.type() == CV_16SC3);
CV_Assert(mask.type() == CV_8U);
int dx = tl.x - dst_roi_.x;
@ -86,9 +92,9 @@ void Blender::feed(const Mat &img, const Mat &mask, Point tl)
for (int y = 0; y < img.rows; ++y)
{
const Point3_<short> *src_row = img.ptr<Point3_<short> >(y);
Point3_<short> *dst_row = dst_.ptr<Point3_<short> >(dy + y);
Point3_<short> *dst_row = dst.ptr<Point3_<short> >(dy + y);
const uchar *mask_row = mask.ptr<uchar>(y);
uchar *dst_mask_row = dst_mask_.ptr<uchar>(dy + y);
uchar *dst_mask_row = dst_mask.ptr<uchar>(dy + y);
for (int x = 0; x < img.cols; ++x)
{
@ -100,11 +106,13 @@ void Blender::feed(const Mat &img, const Mat &mask, Point tl)
}
void Blender::blend(Mat &dst, Mat &dst_mask)
void Blender::blend(InputOutputArray dst, InputOutputArray dst_mask)
{
dst_.setTo(Scalar::all(0), dst_mask_ == 0);
dst = dst_;
dst_mask = dst_mask_;
UMat mask;
compare(dst_mask_, 0, mask, CMP_EQ);
dst_.setTo(Scalar::all(0), mask);
dst.assign(dst_);
dst_mask.assign(dst_mask_);
dst_.release();
dst_mask_.release();
}
@ -118,21 +126,27 @@ void FeatherBlender::prepare(Rect dst_roi)
}
void FeatherBlender::feed(const Mat &img, const Mat &mask, Point tl)
void FeatherBlender::feed(InputArray _img, InputArray mask, Point tl)
{
Mat img = _img.getMat();
Mat dst = dst_.getMat(ACCESS_RW);
CV_Assert(img.type() == CV_16SC3);
CV_Assert(mask.type() == CV_8U);
createWeightMap(mask, sharpness_, weight_map_);
Mat weight_map = weight_map_.getMat(ACCESS_READ);
Mat dst_weight_map = dst_weight_map_.getMat(ACCESS_RW);
int dx = tl.x - dst_roi_.x;
int dy = tl.y - dst_roi_.y;
for (int y = 0; y < img.rows; ++y)
{
const Point3_<short>* src_row = img.ptr<Point3_<short> >(y);
Point3_<short>* dst_row = dst_.ptr<Point3_<short> >(dy + y);
const float* weight_row = weight_map_.ptr<float>(y);
float* dst_weight_row = dst_weight_map_.ptr<float>(dy + y);
Point3_<short>* dst_row = dst.ptr<Point3_<short> >(dy + y);
const float* weight_row = weight_map.ptr<float>(y);
float* dst_weight_row = dst_weight_map.ptr<float>(dy + y);
for (int x = 0; x < img.cols; ++x)
{
@ -145,16 +159,16 @@ void FeatherBlender::feed(const Mat &img, const Mat &mask, Point tl)
}
void FeatherBlender::blend(Mat &dst, Mat &dst_mask)
void FeatherBlender::blend(InputOutputArray dst, InputOutputArray dst_mask)
{
normalizeUsingWeightMap(dst_weight_map_, dst_);
dst_mask_ = dst_weight_map_ > WEIGHT_EPS;
compare(dst_weight_map_, WEIGHT_EPS, dst_mask_, CMP_GT);
Blender::blend(dst, dst_mask);
}
Rect FeatherBlender::createWeightMaps(const std::vector<Mat> &masks, const std::vector<Point> &corners,
std::vector<Mat> &weight_maps)
Rect FeatherBlender::createWeightMaps(const std::vector<UMat> &masks, const std::vector<Point> &corners,
std::vector<UMat> &weight_maps)
{
weight_maps.resize(masks.size());
for (size_t i = 0; i < masks.size(); ++i)
@ -168,7 +182,7 @@ Rect FeatherBlender::createWeightMaps(const std::vector<Mat> &masks, const std::
{
Rect roi(corners[i].x - dst_roi.x, corners[i].y - dst_roi.y,
weight_maps[i].cols, weight_maps[i].rows);
weights_sum(roi) += weight_maps[i];
add(weights_sum(roi), weight_maps[i], weights_sum(roi));
}
for (size_t i = 0; i < weight_maps.size(); ++i)
@ -232,9 +246,39 @@ void MultiBandBlender::prepare(Rect dst_roi)
}
}
void MultiBandBlender::feed(const Mat &img, const Mat &mask, Point tl)
#ifdef HAVE_OPENCL
static bool ocl_MultiBandBlender_feed(InputArray _src, InputArray _weight,
InputOutputArray _dst, InputOutputArray _dst_weight)
{
String buildOptions = "-D DEFINE_feed";
ocl::buildOptionsAddMatrixDescription(buildOptions, "src", _src);
ocl::buildOptionsAddMatrixDescription(buildOptions, "weight", _weight);
ocl::buildOptionsAddMatrixDescription(buildOptions, "dst", _dst);
ocl::buildOptionsAddMatrixDescription(buildOptions, "dstWeight", _dst_weight);
ocl::Kernel k("feed", ocl::stitching::multibandblend_oclsrc, buildOptions);
if (k.empty())
return false;
UMat src = _src.getUMat();
k.args(ocl::KernelArg::ReadOnly(src),
ocl::KernelArg::ReadOnly(_weight.getUMat()),
ocl::KernelArg::ReadWrite(_dst.getUMat()),
ocl::KernelArg::ReadWrite(_dst_weight.getUMat())
);
size_t globalsize[2] = {src.cols, src.rows };
return k.run(2, globalsize, NULL, false);
}
#endif
void MultiBandBlender::feed(InputArray _img, InputArray mask, Point tl)
{
#if ENABLE_LOG
int64 t = getTickCount();
#endif
UMat img = _img.getUMat();
CV_Assert(img.type() == CV_16SC3 || img.type() == CV_8UC3);
CV_Assert(mask.type() == CV_8U);
@ -269,27 +313,39 @@ void MultiBandBlender::feed(const Mat &img, const Mat &mask, Point tl)
int right = br_new.x - tl.x - img.cols;
// Create the source image Laplacian pyramid
Mat img_with_border;
copyMakeBorder(img, img_with_border, top, bottom, left, right,
UMat img_with_border;
copyMakeBorder(_img, img_with_border, top, bottom, left, right,
BORDER_REFLECT);
std::vector<Mat> src_pyr_laplace;
LOGLN(" Add border to the source image, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
#if ENABLE_LOG
t = getTickCount();
#endif
std::vector<UMat> src_pyr_laplace;
if (can_use_gpu_ && img_with_border.depth() == CV_16S)
createLaplacePyrGpu(img_with_border, num_bands_, src_pyr_laplace);
else
createLaplacePyr(img_with_border, num_bands_, src_pyr_laplace);
LOGLN(" Create the source image Laplacian pyramid, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
#if ENABLE_LOG
t = getTickCount();
#endif
// Create the weight map Gaussian pyramid
Mat weight_map;
std::vector<Mat> weight_pyr_gauss(num_bands_ + 1);
UMat weight_map;
std::vector<UMat> weight_pyr_gauss(num_bands_ + 1);
if(weight_type_ == CV_32F)
{
mask.convertTo(weight_map, CV_32F, 1./255.);
mask.getUMat().convertTo(weight_map, CV_32F, 1./255.);
}
else// weight_type_ == CV_16S
else // weight_type_ == CV_16S
{
mask.convertTo(weight_map, CV_16S);
add(weight_map, 1, weight_map, mask != 0);
mask.getUMat().convertTo(weight_map, CV_16S);
UMat add_mask;
compare(mask, 0, add_mask, CMP_NE);
add(weight_map, Scalar::all(1), weight_map, add_mask);
}
copyMakeBorder(weight_map, weight_pyr_gauss[0], top, bottom, left, right, BORDER_CONSTANT);
@ -297,66 +353,77 @@ void MultiBandBlender::feed(const Mat &img, const Mat &mask, Point tl)
for (int i = 0; i < num_bands_; ++i)
pyrDown(weight_pyr_gauss[i], weight_pyr_gauss[i + 1]);
LOGLN(" Create the weight map Gaussian pyramid, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
#if ENABLE_LOG
t = getTickCount();
#endif
int y_tl = tl_new.y - dst_roi_.y;
int y_br = br_new.y - dst_roi_.y;
int x_tl = tl_new.x - dst_roi_.x;
int x_br = br_new.x - dst_roi_.x;
// Add weighted layer of the source image to the final Laplacian pyramid layer
if(weight_type_ == CV_32F)
for (int i = 0; i <= num_bands_; ++i)
{
for (int i = 0; i <= num_bands_; ++i)
Rect rc(x_tl, y_tl, x_br - x_tl, y_br - y_tl);
#ifdef HAVE_OPENCL
if ( !cv::ocl::useOpenCL() ||
!ocl_MultiBandBlender_feed(src_pyr_laplace[i], weight_pyr_gauss[i],
dst_pyr_laplace_[i](rc), dst_band_weights_[i](rc)) )
#endif
{
for (int y = y_tl; y < y_br; ++y)
Mat _src_pyr_laplace = src_pyr_laplace[i].getMat(ACCESS_READ);
Mat _dst_pyr_laplace = dst_pyr_laplace_[i](rc).getMat(ACCESS_RW);
Mat _weight_pyr_gauss = weight_pyr_gauss[i].getMat(ACCESS_READ);
Mat _dst_band_weights = dst_band_weights_[i](rc).getMat(ACCESS_RW);
if(weight_type_ == CV_32F)
{
int y_ = y - y_tl;
const Point3_<short>* src_row = src_pyr_laplace[i].ptr<Point3_<short> >(y_);
Point3_<short>* dst_row = dst_pyr_laplace_[i].ptr<Point3_<short> >(y);
const float* weight_row = weight_pyr_gauss[i].ptr<float>(y_);
float* dst_weight_row = dst_band_weights_[i].ptr<float>(y);
for (int x = x_tl; x < x_br; ++x)
for (int y = 0; y < rc.height; ++y)
{
int x_ = x - x_tl;
dst_row[x].x += static_cast<short>(src_row[x_].x * weight_row[x_]);
dst_row[x].y += static_cast<short>(src_row[x_].y * weight_row[x_]);
dst_row[x].z += static_cast<short>(src_row[x_].z * weight_row[x_]);
dst_weight_row[x] += weight_row[x_];
const Point3_<short>* src_row = _src_pyr_laplace.ptr<Point3_<short> >(y);
Point3_<short>* dst_row = _dst_pyr_laplace.ptr<Point3_<short> >(y);
const float* weight_row = _weight_pyr_gauss.ptr<float>(y);
float* dst_weight_row = _dst_band_weights.ptr<float>(y);
for (int x = 0; x < rc.width; ++x)
{
dst_row[x].x += static_cast<short>(src_row[x].x * weight_row[x]);
dst_row[x].y += static_cast<short>(src_row[x].y * weight_row[x]);
dst_row[x].z += static_cast<short>(src_row[x].z * weight_row[x]);
dst_weight_row[x] += weight_row[x];
}
}
}
x_tl /= 2; y_tl /= 2;
x_br /= 2; y_br /= 2;
}
}
else// weight_type_ == CV_16S
{
for (int i = 0; i <= num_bands_; ++i)
{
for (int y = y_tl; y < y_br; ++y)
else // weight_type_ == CV_16S
{
int y_ = y - y_tl;
const Point3_<short>* src_row = src_pyr_laplace[i].ptr<Point3_<short> >(y_);
Point3_<short>* dst_row = dst_pyr_laplace_[i].ptr<Point3_<short> >(y);
const short* weight_row = weight_pyr_gauss[i].ptr<short>(y_);
short* dst_weight_row = dst_band_weights_[i].ptr<short>(y);
for (int x = x_tl; x < x_br; ++x)
for (int y = 0; y < y_br - y_tl; ++y)
{
int x_ = x - x_tl;
dst_row[x].x += short((src_row[x_].x * weight_row[x_]) >> 8);
dst_row[x].y += short((src_row[x_].y * weight_row[x_]) >> 8);
dst_row[x].z += short((src_row[x_].z * weight_row[x_]) >> 8);
dst_weight_row[x] += weight_row[x_];
const Point3_<short>* src_row = _src_pyr_laplace.ptr<Point3_<short> >(y);
Point3_<short>* dst_row = _dst_pyr_laplace.ptr<Point3_<short> >(y);
const short* weight_row = _weight_pyr_gauss.ptr<short>(y);
short* dst_weight_row = _dst_band_weights.ptr<short>(y);
for (int x = 0; x < x_br - x_tl; ++x)
{
dst_row[x].x += short((src_row[x].x * weight_row[x]) >> 8);
dst_row[x].y += short((src_row[x].y * weight_row[x]) >> 8);
dst_row[x].z += short((src_row[x].z * weight_row[x]) >> 8);
dst_weight_row[x] += weight_row[x];
}
}
}
x_tl /= 2; y_tl /= 2;
x_br /= 2; y_br /= 2;
}
x_tl /= 2; y_tl /= 2;
x_br /= 2; y_br /= 2;
}
LOGLN(" Add weighted layer of the source image to the final Laplacian pyramid layer, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
}
void MultiBandBlender::blend(Mat &dst, Mat &dst_mask)
void MultiBandBlender::blend(InputOutputArray dst, InputOutputArray dst_mask)
{
for (int i = 0; i <= num_bands_; ++i)
normalizeUsingWeightMap(dst_band_weights_[i], dst_pyr_laplace_[i]);
@ -366,10 +433,10 @@ void MultiBandBlender::blend(Mat &dst, Mat &dst_mask)
else
restoreImageFromLaplacePyr(dst_pyr_laplace_);
dst_ = dst_pyr_laplace_[0];
dst_ = dst_(Range(0, dst_roi_final_.height), Range(0, dst_roi_final_.width));
dst_mask_ = dst_band_weights_[0] > WEIGHT_EPS;
dst_mask_ = dst_mask_(Range(0, dst_roi_final_.height), Range(0, dst_roi_final_.width));
Rect dst_rc(0, 0, dst_roi_final_.width, dst_roi_final_.height);
dst_ = dst_pyr_laplace_[0](dst_rc);
UMat _dst_mask;
compare(dst_band_weights_[0](dst_rc), WEIGHT_EPS, dst_mask_, CMP_GT);
dst_pyr_laplace_.clear();
dst_band_weights_.clear();
@ -380,59 +447,92 @@ void MultiBandBlender::blend(Mat &dst, Mat &dst_mask)
//////////////////////////////////////////////////////////////////////////////
// Auxiliary functions
void normalizeUsingWeightMap(const Mat& weight, Mat& src)
#ifdef HAVE_OPENCL
static bool ocl_normalizeUsingWeightMap(InputArray _weight, InputOutputArray _mat)
{
String buildOptions = "-D DEFINE_normalizeUsingWeightMap";
ocl::buildOptionsAddMatrixDescription(buildOptions, "mat", _mat);
ocl::buildOptionsAddMatrixDescription(buildOptions, "weight", _weight);
ocl::Kernel k("normalizeUsingWeightMap", ocl::stitching::multibandblend_oclsrc, buildOptions);
if (k.empty())
return false;
UMat mat = _mat.getUMat();
k.args(ocl::KernelArg::ReadWrite(mat),
ocl::KernelArg::ReadOnly(_weight.getUMat())
);
size_t globalsize[2] = {mat.cols, mat.rows };
return k.run(2, globalsize, NULL, false);
}
#endif
void normalizeUsingWeightMap(InputArray _weight, InputOutputArray _src)
{
#ifdef HAVE_TEGRA_OPTIMIZATION
if(tegra::normalizeUsingWeightMap(weight, src))
return;
#endif
CV_Assert(src.type() == CV_16SC3);
if(weight.type() == CV_32FC1)
#ifdef HAVE_OPENCL
if ( !cv::ocl::useOpenCL() ||
!ocl_normalizeUsingWeightMap(_weight, _src) )
#endif
{
for (int y = 0; y < src.rows; ++y)
{
Point3_<short> *row = src.ptr<Point3_<short> >(y);
const float *weight_row = weight.ptr<float>(y);
Mat weight = _weight.getMat();
Mat src = _src.getMat();
for (int x = 0; x < src.cols; ++x)
CV_Assert(src.type() == CV_16SC3);
if(weight.type() == CV_32FC1)
{
for (int y = 0; y < src.rows; ++y)
{
row[x].x = static_cast<short>(row[x].x / (weight_row[x] + WEIGHT_EPS));
row[x].y = static_cast<short>(row[x].y / (weight_row[x] + WEIGHT_EPS));
row[x].z = static_cast<short>(row[x].z / (weight_row[x] + WEIGHT_EPS));
Point3_<short> *row = src.ptr<Point3_<short> >(y);
const float *weight_row = weight.ptr<float>(y);
for (int x = 0; x < src.cols; ++x)
{
row[x].x = static_cast<short>(row[x].x / (weight_row[x] + WEIGHT_EPS));
row[x].y = static_cast<short>(row[x].y / (weight_row[x] + WEIGHT_EPS));
row[x].z = static_cast<short>(row[x].z / (weight_row[x] + WEIGHT_EPS));
}
}
}
}
else
{
CV_Assert(weight.type() == CV_16SC1);
for (int y = 0; y < src.rows; ++y)
else
{
const short *weight_row = weight.ptr<short>(y);
Point3_<short> *row = src.ptr<Point3_<short> >(y);
CV_Assert(weight.type() == CV_16SC1);
for (int x = 0; x < src.cols; ++x)
for (int y = 0; y < src.rows; ++y)
{
int w = weight_row[x] + 1;
row[x].x = static_cast<short>((row[x].x << 8) / w);
row[x].y = static_cast<short>((row[x].y << 8) / w);
row[x].z = static_cast<short>((row[x].z << 8) / w);
const short *weight_row = weight.ptr<short>(y);
Point3_<short> *row = src.ptr<Point3_<short> >(y);
for (int x = 0; x < src.cols; ++x)
{
int w = weight_row[x] + 1;
row[x].x = static_cast<short>((row[x].x << 8) / w);
row[x].y = static_cast<short>((row[x].y << 8) / w);
row[x].z = static_cast<short>((row[x].z << 8) / w);
}
}
}
}
}
void createWeightMap(const Mat &mask, float sharpness, Mat &weight)
void createWeightMap(InputArray mask, float sharpness, InputOutputArray weight)
{
CV_Assert(mask.type() == CV_8U);
distanceTransform(mask, weight, DIST_L1, 3);
threshold(weight * sharpness, weight, 1.f, 1.f, THRESH_TRUNC);
UMat tmp;
multiply(weight, sharpness, tmp);
threshold(tmp, weight, 1.f, 1.f, THRESH_TRUNC);
}
void createLaplacePyr(const Mat &img, int num_levels, std::vector<Mat> &pyr)
void createLaplacePyr(InputArray img, int num_levels, std::vector<UMat> &pyr)
{
#ifdef HAVE_TEGRA_OPTIMIZATION
if(tegra::createLaplacePyr(img, num_levels, pyr))
@ -445,18 +545,18 @@ void createLaplacePyr(const Mat &img, int num_levels, std::vector<Mat> &pyr)
{
if(num_levels == 0)
{
img.convertTo(pyr[0], CV_16S);
img.getUMat().convertTo(pyr[0], CV_16S);
return;
}
Mat downNext;
Mat current = img;
UMat downNext;
UMat current = img.getUMat();
pyrDown(img, downNext);
for(int i = 1; i < num_levels; ++i)
{
Mat lvl_up;
Mat lvl_down;
UMat lvl_up;
UMat lvl_down;
pyrDown(downNext, lvl_down);
pyrUp(downNext, lvl_up, current.size());
@ -467,7 +567,7 @@ void createLaplacePyr(const Mat &img, int num_levels, std::vector<Mat> &pyr)
}
{
Mat lvl_up;
UMat lvl_up;
pyrUp(downNext, lvl_up, current.size());
subtract(current, lvl_up, pyr[num_levels-1], noArray(), CV_16S);
@ -476,10 +576,10 @@ void createLaplacePyr(const Mat &img, int num_levels, std::vector<Mat> &pyr)
}
else
{
pyr[0] = img;
pyr[0] = img.getUMat();
for (int i = 0; i < num_levels; ++i)
pyrDown(pyr[i], pyr[i + 1]);
Mat tmp;
UMat tmp;
for (int i = 0; i < num_levels; ++i)
{
pyrUp(pyr[i + 1], tmp, pyr[i].size());
@ -489,7 +589,7 @@ void createLaplacePyr(const Mat &img, int num_levels, std::vector<Mat> &pyr)
}
void createLaplacePyrGpu(const Mat &img, int num_levels, std::vector<Mat> &pyr)
void createLaplacePyrGpu(InputArray img, int num_levels, std::vector<UMat> &pyr)
{
#if defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
pyr.resize(num_levels + 1);
@ -517,11 +617,11 @@ void createLaplacePyrGpu(const Mat &img, int num_levels, std::vector<Mat> &pyr)
}
void restoreImageFromLaplacePyr(std::vector<Mat> &pyr)
void restoreImageFromLaplacePyr(std::vector<UMat> &pyr)
{
if (pyr.empty())
return;
Mat tmp;
UMat tmp;
for (size_t i = pyr.size() - 1; i > 0; --i)
{
pyrUp(pyr[i], tmp, pyr[i - 1].size());
@ -530,7 +630,7 @@ void restoreImageFromLaplacePyr(std::vector<Mat> &pyr)
}
void restoreImageFromLaplacePyrGpu(std::vector<Mat> &pyr)
void restoreImageFromLaplacePyrGpu(std::vector<UMat> &pyr)
{
#if defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
if (pyr.empty())

View File

@ -58,18 +58,18 @@ Ptr<ExposureCompensator> ExposureCompensator::createDefault(int type)
}
void ExposureCompensator::feed(const std::vector<Point> &corners, const std::vector<Mat> &images,
const std::vector<Mat> &masks)
void ExposureCompensator::feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
const std::vector<UMat> &masks)
{
std::vector<std::pair<Mat,uchar> > level_masks;
std::vector<std::pair<UMat,uchar> > level_masks;
for (size_t i = 0; i < masks.size(); ++i)
level_masks.push_back(std::make_pair(masks[i], 255));
feed(corners, images, level_masks);
}
void GainCompensator::feed(const std::vector<Point> &corners, const std::vector<Mat> &images,
const std::vector<std::pair<Mat,uchar> > &masks)
void GainCompensator::feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
const std::vector<std::pair<UMat,uchar> > &masks)
{
LOGLN("Exposure compensation...");
#if ENABLE_LOG
@ -93,11 +93,11 @@ void GainCompensator::feed(const std::vector<Point> &corners, const std::vector<
Rect roi;
if (overlapRoi(corners[i], corners[j], images[i].size(), images[j].size(), roi))
{
subimg1 = images[i](Rect(roi.tl() - corners[i], roi.br() - corners[i]));
subimg2 = images[j](Rect(roi.tl() - corners[j], roi.br() - corners[j]));
subimg1 = images[i](Rect(roi.tl() - corners[i], roi.br() - corners[i])).getMat(ACCESS_READ);
subimg2 = images[j](Rect(roi.tl() - corners[j], roi.br() - corners[j])).getMat(ACCESS_READ);
submask1 = masks[i].first(Rect(roi.tl() - corners[i], roi.br() - corners[i]));
submask2 = masks[j].first(Rect(roi.tl() - corners[j], roi.br() - corners[j]));
submask1 = masks[i].first(Rect(roi.tl() - corners[i], roi.br() - corners[i])).getMat(ACCESS_READ);
submask2 = masks[j].first(Rect(roi.tl() - corners[j], roi.br() - corners[j])).getMat(ACCESS_READ);
intersect = (submask1 == masks[i].second) & (submask2 == masks[j].second);
N(i, j) = N(j, i) = std::max(1, countNonZero(intersect));
@ -145,9 +145,9 @@ void GainCompensator::feed(const std::vector<Point> &corners, const std::vector<
}
void GainCompensator::apply(int index, Point /*corner*/, Mat &image, const Mat &/*mask*/)
void GainCompensator::apply(int index, Point /*corner*/, InputOutputArray image, InputArray /*mask*/)
{
image *= gains_(index, 0);
multiply(image, gains_(index, 0), image);
}
@ -160,8 +160,8 @@ std::vector<double> GainCompensator::gains() const
}
void BlocksGainCompensator::feed(const std::vector<Point> &corners, const std::vector<Mat> &images,
const std::vector<std::pair<Mat,uchar> > &masks)
void BlocksGainCompensator::feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
const std::vector<std::pair<UMat,uchar> > &masks)
{
CV_Assert(corners.size() == images.size() && images.size() == masks.size());
@ -169,8 +169,8 @@ void BlocksGainCompensator::feed(const std::vector<Point> &corners, const std::v
std::vector<Size> bl_per_imgs(num_images);
std::vector<Point> block_corners;
std::vector<Mat> block_images;
std::vector<std::pair<Mat,uchar> > block_masks;
std::vector<UMat> block_images;
std::vector<std::pair<UMat,uchar> > block_masks;
// Construct blocks for gain compensator
for (int img_idx = 0; img_idx < num_images; ++img_idx)
@ -208,11 +208,14 @@ void BlocksGainCompensator::feed(const std::vector<Point> &corners, const std::v
for (int img_idx = 0; img_idx < num_images; ++img_idx)
{
Size bl_per_img = bl_per_imgs[img_idx];
gain_maps_[img_idx].create(bl_per_img);
gain_maps_[img_idx].create(bl_per_img, CV_32F);
for (int by = 0; by < bl_per_img.height; ++by)
for (int bx = 0; bx < bl_per_img.width; ++bx, ++bl_idx)
gain_maps_[img_idx](by, bx) = static_cast<float>(gains[bl_idx]);
{
Mat_<float> gain_map = gain_maps_[img_idx].getMat(ACCESS_WRITE);
for (int by = 0; by < bl_per_img.height; ++by)
for (int bx = 0; bx < bl_per_img.width; ++bx, ++bl_idx)
gain_map(by, bx) = static_cast<float>(gains[bl_idx]);
}
sepFilter2D(gain_maps_[img_idx], gain_maps_[img_idx], CV_32F, ker, ker);
sepFilter2D(gain_maps_[img_idx], gain_maps_[img_idx], CV_32F, ker, ker);
@ -220,16 +223,18 @@ void BlocksGainCompensator::feed(const std::vector<Point> &corners, const std::v
}
void BlocksGainCompensator::apply(int index, Point /*corner*/, Mat &image, const Mat &/*mask*/)
void BlocksGainCompensator::apply(int index, Point /*corner*/, InputOutputArray _image, InputArray /*mask*/)
{
CV_Assert(image.type() == CV_8UC3);
CV_Assert(_image.type() == CV_8UC3);
Mat_<float> gain_map;
if (gain_maps_[index].size() == image.size())
gain_map = gain_maps_[index];
UMat u_gain_map;
if (gain_maps_[index].size() == _image.size())
u_gain_map = gain_maps_[index];
else
resize(gain_maps_[index], gain_map, image.size(), 0, 0, INTER_LINEAR);
resize(gain_maps_[index], u_gain_map, _image.size(), 0, 0, INTER_LINEAR);
Mat_<float> gain_map = u_gain_map.getMat(ACCESS_READ);
Mat image = _image.getMat();
for (int y = 0; y < image.rows; ++y)
{
const float* gain_row = gain_map.ptr<float>(y);

View File

@ -155,21 +155,31 @@ void CpuMatcher::match(const ImageFeatures &features1, const ImageFeatures &feat
matches_info.matches.clear();
Ptr<flann::IndexParams> indexParams = makePtr<flann::KDTreeIndexParams>();
Ptr<flann::SearchParams> searchParams = makePtr<flann::SearchParams>();
if (features2.descriptors.depth() == CV_8U)
Ptr<DescriptorMatcher> matcher;
#if 0 // TODO check this
if (ocl::useOpenCL())
{
indexParams->setAlgorithm(cvflann::FLANN_INDEX_LSH);
searchParams->setAlgorithm(cvflann::FLANN_INDEX_LSH);
matcher = makePtr<BFMatcher>((int)NORM_L2);
}
else
#endif
{
Ptr<flann::IndexParams> indexParams = makePtr<flann::KDTreeIndexParams>();
Ptr<flann::SearchParams> searchParams = makePtr<flann::SearchParams>();
FlannBasedMatcher matcher(indexParams, searchParams);
if (features2.descriptors.depth() == CV_8U)
{
indexParams->setAlgorithm(cvflann::FLANN_INDEX_LSH);
searchParams->setAlgorithm(cvflann::FLANN_INDEX_LSH);
}
matcher = makePtr<FlannBasedMatcher>(indexParams, searchParams);
}
std::vector< std::vector<DMatch> > pair_matches;
MatchesSet matches;
// Find 1->2 matches
matcher.knnMatch(features1.descriptors, features2.descriptors, pair_matches, 2);
matcher->knnMatch(features1.descriptors, features2.descriptors, pair_matches, 2);
for (size_t i = 0; i < pair_matches.size(); ++i)
{
if (pair_matches[i].size() < 2)
@ -186,7 +196,7 @@ void CpuMatcher::match(const ImageFeatures &features1, const ImageFeatures &feat
// Find 2->1 matches
pair_matches.clear();
matcher.knnMatch(features2.descriptors, features1.descriptors, pair_matches, 2);
matcher->knnMatch(features2.descriptors, features1.descriptors, pair_matches, 2);
for (size_t i = 0; i < pair_matches.size(); ++i)
{
if (pair_matches[i].size() < 2)
@ -264,14 +274,14 @@ void GpuMatcher::collectGarbage()
namespace cv {
namespace detail {
void FeaturesFinder::operator ()(const Mat &image, ImageFeatures &features)
void FeaturesFinder::operator ()(InputArray image, ImageFeatures &features)
{
find(image, features);
features.img_size = image.size();
}
void FeaturesFinder::operator ()(const Mat &image, ImageFeatures &features, const std::vector<Rect> &rois)
void FeaturesFinder::operator ()(InputArray image, ImageFeatures &features, const std::vector<Rect> &rois)
{
std::vector<ImageFeatures> roi_features(rois.size());
size_t total_kps_count = 0;
@ -279,7 +289,7 @@ void FeaturesFinder::operator ()(const Mat &image, ImageFeatures &features, cons
for (size_t i = 0; i < rois.size(); ++i)
{
find(image(rois[i]), roi_features[i]);
find(image.getUMat()(rois[i]), roi_features[i]);
total_kps_count += roi_features[i].keypoints.size();
total_descriptors_height += roi_features[i].descriptors.rows;
}
@ -300,7 +310,7 @@ void FeaturesFinder::operator ()(const Mat &image, ImageFeatures &features, cons
features.keypoints[kp_idx].pt.x += (float)rois[i].x;
features.keypoints[kp_idx].pt.y += (float)rois[i].y;
}
Mat subdescr = features.descriptors.rowRange(
UMat subdescr = features.descriptors.rowRange(
descr_offset, descr_offset + roi_features[i].descriptors.rows);
roi_features[i].descriptors.copyTo(subdescr);
descr_offset += roi_features[i].descriptors.rows;
@ -337,9 +347,9 @@ SurfFeaturesFinder::SurfFeaturesFinder(double hess_thresh, int num_octaves, int
}
}
void SurfFeaturesFinder::find(const Mat &image, ImageFeatures &features)
void SurfFeaturesFinder::find(InputArray image, ImageFeatures &features)
{
Mat gray_image;
UMat gray_image;
CV_Assert((image.type() == CV_8UC3) || (image.type() == CV_8UC1));
if(image.type() == CV_8UC3)
{
@ -347,7 +357,7 @@ void SurfFeaturesFinder::find(const Mat &image, ImageFeatures &features)
}
else
{
gray_image = image;
gray_image = image.getUMat();
}
if (!surf)
{
@ -356,7 +366,7 @@ void SurfFeaturesFinder::find(const Mat &image, ImageFeatures &features)
}
else
{
Mat descriptors;
UMat descriptors;
(*surf)(gray_image, Mat(), features.keypoints, descriptors);
features.descriptors = descriptors.reshape(1, (int)features.keypoints.size());
}
@ -368,9 +378,9 @@ OrbFeaturesFinder::OrbFeaturesFinder(Size _grid_size, int n_features, float scal
orb = makePtr<ORB>(n_features * (99 + grid_size.area())/100/grid_size.area(), scaleFactor, nlevels);
}
void OrbFeaturesFinder::find(const Mat &image, ImageFeatures &features)
void OrbFeaturesFinder::find(InputArray image, ImageFeatures &features)
{
Mat gray_image;
UMat gray_image;
CV_Assert((image.type() == CV_8UC3) || (image.type() == CV_8UC4) || (image.type() == CV_8UC1));
@ -379,7 +389,7 @@ void OrbFeaturesFinder::find(const Mat &image, ImageFeatures &features)
} else if (image.type() == CV_8UC4) {
cvtColor(image, gray_image, COLOR_BGRA2GRAY);
} else if (image.type() == CV_8UC1) {
gray_image=image;
gray_image = image.getUMat();
} else {
CV_Error(Error::StsUnsupportedFormat, "");
}
@ -392,7 +402,8 @@ void OrbFeaturesFinder::find(const Mat &image, ImageFeatures &features)
features.descriptors.release();
std::vector<KeyPoint> points;
Mat descriptors;
Mat _descriptors;
UMat descriptors;
for (int r = 0; r < grid_size.height; ++r)
for (int c = 0; c < grid_size.width; ++c)
@ -408,13 +419,13 @@ void OrbFeaturesFinder::find(const Mat &image, ImageFeatures &features)
// << " xl=" << xl << ", xr=" << xr << ", gray_image.data=" << ((size_t)gray_image.data) << ", "
// << "gray_image.dims=" << gray_image.dims << "\n");
Mat gray_image_part=gray_image(Range(yl, yr), Range(xl, xr));
UMat gray_image_part=gray_image(Range(yl, yr), Range(xl, xr));
// LOGLN("OrbFeaturesFinder::find: gray_image_part.empty=" << (gray_image_part.empty()?"true":"false") << ", "
// << " gray_image_part.size()=(" << gray_image_part.size().width << "x" << gray_image_part.size().height << "), "
// << " gray_image_part.dims=" << gray_image_part.dims << ", "
// << " gray_image_part.data=" << ((size_t)gray_image_part.data) << "\n");
(*orb)(gray_image_part, Mat(), points, descriptors);
(*orb)(gray_image_part, UMat(), points, descriptors);
features.keypoints.reserve(features.keypoints.size() + points.size());
for (std::vector<KeyPoint>::iterator kp = points.begin(); kp != points.end(); ++kp)
@ -423,8 +434,12 @@ void OrbFeaturesFinder::find(const Mat &image, ImageFeatures &features)
kp->pt.y += yl;
features.keypoints.push_back(*kp);
}
features.descriptors.push_back(descriptors);
_descriptors.push_back(descriptors.getMat(ACCESS_READ));
}
// TODO optimize copyTo()
//features.descriptors = _descriptors.getUMat(ACCESS_READ);
_descriptors.copyTo(features.descriptors);
}
}
@ -442,7 +457,7 @@ SurfFeaturesFinderGpu::SurfFeaturesFinderGpu(double hess_thresh, int num_octaves
}
void SurfFeaturesFinderGpu::find(const Mat &image, ImageFeatures &features)
void SurfFeaturesFinderGpu::find(InputArray image, ImageFeatures &features)
{
CV_Assert(image.depth() == CV_8U);
@ -499,12 +514,12 @@ const MatchesInfo& MatchesInfo::operator =(const MatchesInfo &other)
//////////////////////////////////////////////////////////////////////////////
void FeaturesMatcher::operator ()(const std::vector<ImageFeatures> &features, std::vector<MatchesInfo> &pairwise_matches,
const Mat &mask)
const UMat &mask)
{
const int num_images = static_cast<int>(features.size());
CV_Assert(mask.empty() || (mask.type() == CV_8U && mask.cols == num_images && mask.rows));
Mat_<uchar> mask_(mask);
Mat_<uchar> mask_(mask.getMat(ACCESS_READ));
if (mask_.empty())
mask_ = Mat::ones(num_images, num_images, CV_8U);

View File

@ -0,0 +1,282 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2014, Itseez, Inc, all rights reserved.
//
// Common preprocessors macro
//
//
// TODO: Move this common code into "header" file
//
#ifndef NL // New Line: for preprocessor debugging
#define NL
#endif
#define REF(x) x
#define __CAT(x, y) x##y
#define CAT(x, y) __CAT(x, y)
//
// All matrixes are come with this description ("name" is a name of matrix):
// * name_CN - number of channels (1,2,3,4)
// * name_DEPTH - numeric value of CV_MAT_DEPTH(type). See CV_8U, CV_32S, etc macro below.
//
// Currently we also pass these attributes (to reduce this macro block):
// * name_T - datatype (int, float, uchar4, float4)
// * name_T1 - datatype for one channel (int, float, uchar).
// It is equal to result of "T1(name_T)" macro
// * name_TSIZE - CV_ELEM_SIZE(type).
// We can't use sizeof(name_T) here, because sizeof(float3) is usually equal to 8, not 6.
// * name_T1SIZE - CV_ELEM_SIZE1(type)
//
//
// Usage sample:
//
// #define workType TYPE(float, src_CN)
// #define convertToWorkType CONVERT_TO(workType)
// #define convertWorkTypeToDstType CONVERT(workType, dst_T)
//
// __kernel void kernelFn(DECLARE_MAT_ARG(src), DECLARE_MAT_ARG(dst))
// {
// const int x = get_global_id(0);
// const int y = get_global_id(1);
//
// if (x < srcWidth && y < srcHeight)
// {
// int src_byteOffset = MAT_BYTE_OFFSET(src, x, y);
// int dst_byteOffset = MAT_BYTE_OFFSET(dst, x, y);
// workType value = convertToWorkType(LOAD_MAT_AT(src, src_byteOffset));
//
// ... value processing ...
//
// STORE_MAT_AT(dst, dst_byteOffset, convertWorkTypeToDstType(value));
// }
// }
//
#define DECLARE_MAT_ARG(name) \
__global uchar* restrict name ## Ptr, \
int name ## StepBytes, \
int name ## Offset, \
int name ## Height, \
int name ## Width NL
#define MAT_BYTE_OFFSET(name, x, y) mad24((y)/* + name ## OffsetY*/, name ## StepBytes, ((x)/* + name ## OffsetX*/) * (int)(name ## _TSIZE) + name ## Offset)
#define MAT_RELATIVE_BYTE_OFFSET(name, x, y) mad24(y, name ## StepBytes, (x) * (int)(name ## _TSIZE))
#define __LOAD_MAT_AT(name, byteOffset) *((const __global name ## _T*)(name ## Ptr + (byteOffset)))
#define __vload_CN__(name_cn) vload ## name_cn
#define __vload_CN_(name_cn) __vload_CN__(name_cn)
#define __vload_CN(name) __vload_CN_(name ## _CN)
#define __LOAD_MAT_AT_vload(name, byteOffset) __vload_CN(name)(0, ((const __global name ## _T1*)(name ## Ptr + (byteOffset))))
#define __LOAD_MAT_AT_1 __LOAD_MAT_AT
#define __LOAD_MAT_AT_2 __LOAD_MAT_AT
#define __LOAD_MAT_AT_3 __LOAD_MAT_AT_vload
#define __LOAD_MAT_AT_4 __LOAD_MAT_AT
#define __LOAD_MAT_AT_CN__(name_cn) __LOAD_MAT_AT_ ## name_cn
#define __LOAD_MAT_AT_CN_(name_cn) __LOAD_MAT_AT_CN__(name_cn)
#define __LOAD_MAT_AT_CN(name) __LOAD_MAT_AT_CN_(name ## _CN)
#define LOAD_MAT_AT(name, byteOffset) __LOAD_MAT_AT_CN(name)(name, byteOffset)
#define __STORE_MAT_AT(name, byteOffset, v) *((__global name ## _T*)(name ## Ptr + (byteOffset))) = v
#define __vstore_CN__(name_cn) vstore ## name_cn
#define __vstore_CN_(name_cn) __vstore_CN__(name_cn)
#define __vstore_CN(name) __vstore_CN_(name ## _CN)
#define __STORE_MAT_AT_vstore(name, byteOffset, v) __vstore_CN(name)(v, 0, ((__global name ## _T1*)(name ## Ptr + (byteOffset))))
#define __STORE_MAT_AT_1 __STORE_MAT_AT
#define __STORE_MAT_AT_2 __STORE_MAT_AT
#define __STORE_MAT_AT_3 __STORE_MAT_AT_vstore
#define __STORE_MAT_AT_4 __STORE_MAT_AT
#define __STORE_MAT_AT_CN__(name_cn) __STORE_MAT_AT_ ## name_cn
#define __STORE_MAT_AT_CN_(name_cn) __STORE_MAT_AT_CN__(name_cn)
#define __STORE_MAT_AT_CN(name) __STORE_MAT_AT_CN_(name ## _CN)
#define STORE_MAT_AT(name, byteOffset, v) __STORE_MAT_AT_CN(name)(name, byteOffset, v)
#define T1_uchar uchar
#define T1_uchar2 uchar
#define T1_uchar3 uchar
#define T1_uchar4 uchar
#define T1_char char
#define T1_char2 char
#define T1_char3 char
#define T1_char4 char
#define T1_ushort ushort
#define T1_ushort2 ushort
#define T1_ushort3 ushort
#define T1_ushort4 ushort
#define T1_short short
#define T1_short2 short
#define T1_short3 short
#define T1_short4 short
#define T1_int int
#define T1_int2 int
#define T1_int3 int
#define T1_int4 int
#define T1_float float
#define T1_float2 float
#define T1_float3 float
#define T1_float4 float
#define T1_double double
#define T1_double2 double
#define T1_double3 double
#define T1_double4 double
#define T1(type) REF(CAT(T1_, REF(type)))
#define uchar1 uchar
#define char1 char
#define short1 short
#define ushort1 ushort
#define int1 int
#define float1 float
#define double1 double
#define TYPE(type, cn) REF(CAT(REF(type), REF(cn)))
#define __CONVERT_MODE_uchar_uchar __NO_CONVERT
#define __CONVERT_MODE_uchar_char __CONVERT_sat
#define __CONVERT_MODE_uchar_ushort __CONVERT
#define __CONVERT_MODE_uchar_short __CONVERT
#define __CONVERT_MODE_uchar_int __CONVERT
#define __CONVERT_MODE_uchar_float __CONVERT
#define __CONVERT_MODE_uchar_double __CONVERT
#define __CONVERT_MODE_char_uchar __CONVERT_sat
#define __CONVERT_MODE_char_char __NO_CONVERT
#define __CONVERT_MODE_char_ushort __CONVERT_sat
#define __CONVERT_MODE_char_short __CONVERT
#define __CONVERT_MODE_char_int __CONVERT
#define __CONVERT_MODE_char_float __CONVERT
#define __CONVERT_MODE_char_double __CONVERT
#define __CONVERT_MODE_ushort_uchar __CONVERT_sat
#define __CONVERT_MODE_ushort_char __CONVERT_sat
#define __CONVERT_MODE_ushort_ushort __NO_CONVERT
#define __CONVERT_MODE_ushort_short __CONVERT_sat
#define __CONVERT_MODE_ushort_int __CONVERT
#define __CONVERT_MODE_ushort_float __CONVERT
#define __CONVERT_MODE_ushort_double __CONVERT
#define __CONVERT_MODE_short_uchar __CONVERT_sat
#define __CONVERT_MODE_short_char __CONVERT_sat
#define __CONVERT_MODE_short_ushort __CONVERT_sat
#define __CONVERT_MODE_short_short __NO_CONVERT
#define __CONVERT_MODE_short_int __CONVERT
#define __CONVERT_MODE_short_float __CONVERT
#define __CONVERT_MODE_short_double __CONVERT
#define __CONVERT_MODE_int_uchar __CONVERT_sat
#define __CONVERT_MODE_int_char __CONVERT_sat
#define __CONVERT_MODE_int_ushort __CONVERT_sat
#define __CONVERT_MODE_int_short __CONVERT_sat
#define __CONVERT_MODE_int_int __NO_CONVERT
#define __CONVERT_MODE_int_float __CONVERT
#define __CONVERT_MODE_int_double __CONVERT
#define __CONVERT_MODE_float_uchar __CONVERT_sat_rte
#define __CONVERT_MODE_float_char __CONVERT_sat_rte
#define __CONVERT_MODE_float_ushort __CONVERT_sat_rte
#define __CONVERT_MODE_float_short __CONVERT_sat_rte
#define __CONVERT_MODE_float_int __CONVERT_rte
#define __CONVERT_MODE_float_float __NO_CONVERT
#define __CONVERT_MODE_float_double __CONVERT
#define __CONVERT_MODE_double_uchar __CONVERT_sat_rte
#define __CONVERT_MODE_double_char __CONVERT_sat_rte
#define __CONVERT_MODE_double_ushort __CONVERT_sat_rte
#define __CONVERT_MODE_double_short __CONVERT_sat_rte
#define __CONVERT_MODE_double_int __CONVERT_rte
#define __CONVERT_MODE_double_float __CONVERT
#define __CONVERT_MODE_double_double __NO_CONVERT
#define __CONVERT_MODE(srcType, dstType) CAT(__CONVERT_MODE_, CAT(REF(T1(srcType)), CAT(_, REF(T1(dstType)))))
#define __ROUND_MODE__NO_CONVERT
#define __ROUND_MODE__CONVERT // nothing
#define __ROUND_MODE__CONVERT_rte _rte
#define __ROUND_MODE__CONVERT_sat _sat
#define __ROUND_MODE__CONVERT_sat_rte _sat_rte
#define ROUND_MODE(srcType, dstType) CAT(__ROUND_MODE_, __CONVERT_MODE(srcType, dstType))
#define __CONVERT_ROUND(dstType, roundMode) CAT(CAT(convert_, REF(dstType)), roundMode)
#define __NO_CONVERT(dstType) // nothing
#define __CONVERT(dstType) __CONVERT_ROUND(dstType,)
#define __CONVERT_rte(dstType) __CONVERT_ROUND(dstType,_rte)
#define __CONVERT_sat(dstType) __CONVERT_ROUND(dstType,_sat)
#define __CONVERT_sat_rte(dstType) __CONVERT_ROUND(dstType,_sat_rte)
#define CONVERT(srcType, dstType) REF(__CONVERT_MODE(srcType,dstType))(dstType)
#define CONVERT_TO(dstType) __CONVERT_ROUND(dstType,)
// OpenCV depths
#define CV_8U 0
#define CV_8S 1
#define CV_16U 2
#define CV_16S 3
#define CV_32S 4
#define CV_32F 5
#define CV_64F 6
//
// End of common preprocessors macro
//
#if defined(DEFINE_feed)
#define workType TYPE(weight_T1, src_CN)
#define convertSrcToWorkType CONVERT_TO(workType)
#define convertToDstType CONVERT_TO(dst_T) // sat_rte provides incompatible results with CPU path
__kernel void feed(
DECLARE_MAT_ARG(src), DECLARE_MAT_ARG(weight),
DECLARE_MAT_ARG(dst), DECLARE_MAT_ARG(dstWeight)
)
{
const int x = get_global_id(0);
const int y = get_global_id(1);
if (x < srcWidth && y < srcHeight)
{
int src_byteOffset = MAT_BYTE_OFFSET(src, x, y);
int weight_byteOffset = MAT_BYTE_OFFSET(weight, x, y);
int dst_byteOffset = MAT_BYTE_OFFSET(dst, x, y);
int dstWeight_byteOffset = MAT_BYTE_OFFSET(dstWeight, x, y);
weight_T w = LOAD_MAT_AT(weight, weight_byteOffset);
workType src_value = convertSrcToWorkType(LOAD_MAT_AT(src, src_byteOffset));
STORE_MAT_AT(dst, dst_byteOffset, LOAD_MAT_AT(dst, dst_byteOffset) + convertToDstType(src_value * w));
STORE_MAT_AT(dstWeight, dstWeight_byteOffset, LOAD_MAT_AT(dstWeight, dstWeight_byteOffset) + w);
}
}
#endif
#if defined(DEFINE_normalizeUsingWeightMap)
#define workType TYPE(weight_T1, mat_CN)
#define convertSrcToWorkType CONVERT_TO(workType)
#define convertToDstType CONVERT_TO(mat_T) // sat_rte provides incompatible results with CPU path
#if weight_DEPTH >= CV_32F
#define WEIGHT_EPS 1e-5f
#else
#define WEIGHT_EPS 0
#endif
__kernel void normalizeUsingWeightMap(
DECLARE_MAT_ARG(mat), DECLARE_MAT_ARG(weight)
)
{
const int x = get_global_id(0);
const int y = get_global_id(1);
if (x < matWidth && y < matHeight)
{
int mat_byteOffset = MAT_BYTE_OFFSET(mat, x, y);
int weight_byteOffset = MAT_BYTE_OFFSET(weight, x, y);
weight_T w = LOAD_MAT_AT(weight, weight_byteOffset);
workType value = convertSrcToWorkType(LOAD_MAT_AT(mat, mat_byteOffset));
value = value / (w + WEIGHT_EPS);
STORE_MAT_AT(mat, mat_byteOffset, convertToDstType(value));
}
}
#endif

View File

@ -51,6 +51,7 @@
#include <set>
#include <functional>
#include <sstream>
#include <iostream>
#include <cmath>
#include "opencv2/core.hpp"
#include "opencv2/core/ocl.hpp"

View File

@ -46,8 +46,8 @@
namespace cv {
namespace detail {
void PairwiseSeamFinder::find(const std::vector<Mat> &src, const std::vector<Point> &corners,
std::vector<Mat> &masks)
void PairwiseSeamFinder::find(const std::vector<UMat> &src, const std::vector<Point> &corners,
std::vector<UMat> &masks)
{
LOGLN("Finding seams...");
if (src.size() == 0)
@ -84,7 +84,7 @@ void PairwiseSeamFinder::run()
void VoronoiSeamFinder::find(const std::vector<Size> &sizes, const std::vector<Point> &corners,
std::vector<Mat> &masks)
std::vector<UMat> &masks)
{
LOGLN("Finding seams...");
if (sizes.size() == 0)
@ -110,7 +110,7 @@ void VoronoiSeamFinder::findInPair(size_t first, size_t second, Rect roi)
Mat submask2(roi.height + 2 * gap, roi.width + 2 * gap, CV_8U);
Size img1 = sizes_[first], img2 = sizes_[second];
Mat mask1 = masks_[first], mask2 = masks_[second];
Mat mask1 = masks_[first].getMat(ACCESS_READ), mask2 = masks_[second].getMat(ACCESS_READ);
Point tl1 = corners_[first], tl2 = corners_[second];
// Cut submasks with some gap
@ -160,7 +160,7 @@ void VoronoiSeamFinder::findInPair(size_t first, size_t second, Rect roi)
DpSeamFinder::DpSeamFinder(CostFunction costFunc) : costFunc_(costFunc) {}
void DpSeamFinder::find(const std::vector<Mat> &src, const std::vector<Point> &corners, std::vector<Mat> &masks)
void DpSeamFinder::find(const std::vector<UMat> &src, const std::vector<Point> &corners, std::vector<UMat> &masks)
{
LOGLN("Finding seams...");
#if ENABLE_LOG
@ -176,13 +176,18 @@ void DpSeamFinder::find(const std::vector<Mat> &src, const std::vector<Point> &c
for (size_t j = i+1; j < src.size(); ++j)
pairs.push_back(std::make_pair(i, j));
sort(pairs.begin(), pairs.end(), ImagePairLess(src, corners));
{
std::vector<Mat> _src(src.size());
for (size_t i = 0; i < src.size(); ++i) _src[i] = src[i].getMat(ACCESS_READ);
sort(pairs.begin(), pairs.end(), ImagePairLess(_src, corners));
}
std::reverse(pairs.begin(), pairs.end());
for (size_t i = 0; i < pairs.size(); ++i)
{
size_t i0 = pairs[i].first, i1 = pairs[i].second;
process(src[i0], src[i1], corners[i0], corners[i1], masks[i0], masks[i1]);
Mat mask0 = masks[i0].getMat(ACCESS_RW), mask1 = masks[i1].getMat(ACCESS_RW);
process(src[i0].getMat(ACCESS_READ), src[i1].getMat(ACCESS_READ), corners[i0], corners[i1], mask0, mask1);
}
LOGLN("Finding seams, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
@ -1055,7 +1060,7 @@ public:
~Impl() {}
void find(const std::vector<Mat> &src, const std::vector<Point> &corners, std::vector<Mat> &masks);
void find(const std::vector<UMat> &src, const std::vector<Point> &corners, std::vector<UMat> &masks);
void findInPair(size_t first, size_t second, Rect roi);
private:
@ -1072,8 +1077,8 @@ private:
};
void GraphCutSeamFinder::Impl::find(const std::vector<Mat> &src, const std::vector<Point> &corners,
std::vector<Mat> &masks)
void GraphCutSeamFinder::Impl::find(const std::vector<UMat> &src, const std::vector<Point> &corners,
std::vector<UMat> &masks)
{
// Compute gradients
dx_.resize(src.size());
@ -1207,10 +1212,10 @@ void GraphCutSeamFinder::Impl::setGraphWeightsColorGrad(
void GraphCutSeamFinder::Impl::findInPair(size_t first, size_t second, Rect roi)
{
Mat img1 = images_[first], img2 = images_[second];
Mat img1 = images_[first].getMat(ACCESS_READ), img2 = images_[second].getMat(ACCESS_READ);
Mat dx1 = dx_[first], dx2 = dx_[second];
Mat dy1 = dy_[first], dy2 = dy_[second];
Mat mask1 = masks_[first], mask2 = masks_[second];
Mat mask1 = masks_[first].getMat(ACCESS_RW), mask2 = masks_[second].getMat(ACCESS_RW);
Point tl1 = corners_[first], tl2 = corners_[second];
const int gap = 10;
@ -1309,16 +1314,16 @@ GraphCutSeamFinder::GraphCutSeamFinder(int cost_type, float terminal_cost, float
GraphCutSeamFinder::~GraphCutSeamFinder() {}
void GraphCutSeamFinder::find(const std::vector<Mat> &src, const std::vector<Point> &corners,
std::vector<Mat> &masks)
void GraphCutSeamFinder::find(const std::vector<UMat> &src, const std::vector<Point> &corners,
std::vector<UMat> &masks)
{
impl_->find(src, corners, masks);
}
#ifdef HAVE_OPENCV_CUDA
void GraphCutSeamFinderGpu::find(const std::vector<Mat> &src, const std::vector<Point> &corners,
std::vector<Mat> &masks)
void GraphCutSeamFinderGpu::find(const std::vector<UMat> &src, const std::vector<Point> &corners,
std::vector<UMat> &masks)
{
// Compute gradients
dx_.resize(src.size());
@ -1350,10 +1355,10 @@ void GraphCutSeamFinderGpu::find(const std::vector<Mat> &src, const std::vector<
void GraphCutSeamFinderGpu::findInPair(size_t first, size_t second, Rect roi)
{
Mat img1 = images_[first], img2 = images_[second];
Mat img1 = images_[first].getMat(ACCESS_READ), img2 = images_[second].getMat(ACCESS_READ);
Mat dx1 = dx_[first], dx2 = dx_[second];
Mat dy1 = dy_[first], dy2 = dy_[second];
Mat mask1 = masks_[first], mask2 = masks_[second];
Mat mask1 = masks_[first].getMat(ACCESS_READ), mask2 = masks_[second].getMat(ACCESS_READ);
Point tl1 = corners_[first], tl2 = corners_[second];
const int gap = 10;

View File

@ -86,15 +86,15 @@ Stitcher Stitcher::createDefault(bool try_use_gpu)
}
Stitcher::Status Stitcher::estimateTransform(InputArray images)
Stitcher::Status Stitcher::estimateTransform(InputArrayOfArrays images)
{
return estimateTransform(images, std::vector<std::vector<Rect> >());
}
Stitcher::Status Stitcher::estimateTransform(InputArray images, const std::vector<std::vector<Rect> > &rois)
Stitcher::Status Stitcher::estimateTransform(InputArrayOfArrays images, const std::vector<std::vector<Rect> > &rois)
{
images.getMatVector(imgs_);
images.getUMatVector(imgs_);
rois_ = rois;
Status status;
@ -112,21 +112,21 @@ Stitcher::Status Stitcher::estimateTransform(InputArray images, const std::vecto
Stitcher::Status Stitcher::composePanorama(OutputArray pano)
{
return composePanorama(std::vector<Mat>(), pano);
return composePanorama(std::vector<UMat>(), pano);
}
Stitcher::Status Stitcher::composePanorama(InputArray images, OutputArray pano)
Stitcher::Status Stitcher::composePanorama(InputArrayOfArrays images, OutputArray pano)
{
LOGLN("Warping images (auxiliary)... ");
std::vector<Mat> imgs;
images.getMatVector(imgs);
std::vector<UMat> imgs;
images.getUMatVector(imgs);
if (!imgs.empty())
{
CV_Assert(imgs.size() == imgs_.size());
Mat img;
UMat img;
seam_est_imgs_.resize(imgs.size());
for (size_t i = 0; i < imgs.size(); ++i)
@ -136,8 +136,8 @@ Stitcher::Status Stitcher::composePanorama(InputArray images, OutputArray pano)
seam_est_imgs_[i] = img.clone();
}
std::vector<Mat> seam_est_imgs_subset;
std::vector<Mat> imgs_subset;
std::vector<UMat> seam_est_imgs_subset;
std::vector<UMat> imgs_subset;
for (size_t i = 0; i < indices_.size(); ++i)
{
@ -149,17 +149,17 @@ Stitcher::Status Stitcher::composePanorama(InputArray images, OutputArray pano)
imgs_ = imgs_subset;
}
Mat &pano_ = pano.getMatRef();
UMat pano_;
#if ENABLE_LOG
int64 t = getTickCount();
#endif
std::vector<Point> corners(imgs_.size());
std::vector<Mat> masks_warped(imgs_.size());
std::vector<Mat> images_warped(imgs_.size());
std::vector<UMat> masks_warped(imgs_.size());
std::vector<UMat> images_warped(imgs_.size());
std::vector<Size> sizes(imgs_.size());
std::vector<Mat> masks(imgs_.size());
std::vector<UMat> masks(imgs_.size());
// Prepare image masks
for (size_t i = 0; i < imgs_.size(); ++i)
@ -179,13 +179,13 @@ Stitcher::Status Stitcher::composePanorama(InputArray images, OutputArray pano)
K(1,1) *= (float)seam_work_aspect_;
K(1,2) *= (float)seam_work_aspect_;
corners[i] = w->warp(seam_est_imgs_[i], K, cameras_[i].R, INTER_LINEAR, BORDER_REFLECT, images_warped[i]);
corners[i] = w->warp(seam_est_imgs_[i], K, cameras_[i].R, INTER_LINEAR, BORDER_CONSTANT, images_warped[i]);
sizes[i] = images_warped[i].size();
w->warp(masks[i], K, cameras_[i].R, INTER_NEAREST, BORDER_CONSTANT, masks_warped[i]);
}
std::vector<Mat> images_warped_f(imgs_.size());
std::vector<UMat> images_warped_f(imgs_.size());
for (size_t i = 0; i < imgs_.size(); ++i)
images_warped[i].convertTo(images_warped_f[i], CV_32F);
@ -206,8 +206,8 @@ Stitcher::Status Stitcher::composePanorama(InputArray images, OutputArray pano)
t = getTickCount();
#endif
Mat img_warped, img_warped_s;
Mat dilated_mask, seam_mask, mask, mask_warped;
UMat img_warped, img_warped_s;
UMat dilated_mask, seam_mask, mask, mask_warped;
//double compose_seam_aspect = 1;
double compose_work_aspect = 1;
@ -216,10 +216,13 @@ Stitcher::Status Stitcher::composePanorama(InputArray images, OutputArray pano)
double compose_scale = 1;
bool is_compose_scale_set = false;
Mat full_img, img;
UMat full_img, img;
for (size_t img_idx = 0; img_idx < imgs_.size(); ++img_idx)
{
LOGLN("Compositing image #" << indices_[img_idx] + 1);
#if ENABLE_LOG
int64 compositing_t = getTickCount();
#endif
// Read image and resize it if necessary
full_img = imgs_[img_idx];
@ -261,25 +264,48 @@ Stitcher::Status Stitcher::composePanorama(InputArray images, OutputArray pano)
}
}
if (std::abs(compose_scale - 1) > 1e-1)
{
#if ENABLE_LOG
int64 resize_t = getTickCount();
#endif
resize(full_img, img, Size(), compose_scale, compose_scale);
LOGLN(" resize time: " << ((getTickCount() - resize_t) / getTickFrequency()) << " sec");
}
else
img = full_img;
full_img.release();
Size img_size = img.size();
LOGLN(" after resize time: " << ((getTickCount() - compositing_t) / getTickFrequency()) << " sec");
Mat K;
cameras_[img_idx].K().convertTo(K, CV_32F);
#if ENABLE_LOG
int64 pt = getTickCount();
#endif
// Warp the current image
w->warp(img, K, cameras_[img_idx].R, INTER_LINEAR, BORDER_REFLECT, img_warped);
w->warp(img, K, cameras_[img_idx].R, INTER_LINEAR, BORDER_CONSTANT, img_warped);
LOGLN(" warp the current image: " << ((getTickCount() - pt) / getTickFrequency()) << " sec");
#if ENABLE_LOG
pt = getTickCount();
#endif
// Warp the current image mask
mask.create(img_size, CV_8U);
mask.setTo(Scalar::all(255));
w->warp(mask, K, cameras_[img_idx].R, INTER_NEAREST, BORDER_CONSTANT, mask_warped);
LOGLN(" warp the current image mask: " << ((getTickCount() - pt) / getTickFrequency()) << " sec");
#if ENABLE_LOG
pt = getTickCount();
#endif
// Compensate exposure
exposure_comp_->apply((int)img_idx, corners[img_idx], img_warped, mask_warped);
LOGLN(" compensate exposure: " << ((getTickCount() - pt) / getTickFrequency()) << " sec");
#if ENABLE_LOG
pt = getTickCount();
#endif
img_warped.convertTo(img_warped_s, CV_16S);
img_warped.release();
@ -290,7 +316,12 @@ Stitcher::Status Stitcher::composePanorama(InputArray images, OutputArray pano)
dilate(masks_warped[img_idx], dilated_mask, Mat());
resize(dilated_mask, seam_mask, mask_warped.size());
mask_warped = seam_mask & mask_warped;
bitwise_and(seam_mask, mask_warped, mask_warped);
LOGLN(" other: " << ((getTickCount() - pt) / getTickFrequency()) << " sec");
#if ENABLE_LOG
pt = getTickCount();
#endif
if (!is_blender_prepared)
{
@ -298,24 +329,36 @@ Stitcher::Status Stitcher::composePanorama(InputArray images, OutputArray pano)
is_blender_prepared = true;
}
LOGLN(" other2: " << ((getTickCount() - pt) / getTickFrequency()) << " sec");
LOGLN(" feed...");
#if ENABLE_LOG
int64 feed_t = getTickCount();
#endif
// Blend the current image
blender_->feed(img_warped_s, mask_warped, corners[img_idx]);
LOGLN(" feed time: " << ((getTickCount() - feed_t) / getTickFrequency()) << " sec");
LOGLN("Compositing ## time: " << ((getTickCount() - compositing_t) / getTickFrequency()) << " sec");
}
Mat result, result_mask;
#if ENABLE_LOG
int64 blend_t = getTickCount();
#endif
UMat result, result_mask;
blender_->blend(result, result_mask);
LOGLN("blend time: " << ((getTickCount() - blend_t) / getTickFrequency()) << " sec");
LOGLN("Compositing, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
// Preliminary result is in CV_16SC3 format, but all values are in [0,255] range,
// so convert it to avoid user confusing
result.convertTo(pano_, CV_8U);
result.convertTo(pano, CV_8U);
return OK;
}
Stitcher::Status Stitcher::stitch(InputArray images, OutputArray pano)
Stitcher::Status Stitcher::stitch(InputArrayOfArrays images, OutputArray pano)
{
Status status = estimateTransform(images);
if (status != OK)
@ -324,7 +367,7 @@ Stitcher::Status Stitcher::stitch(InputArray images, OutputArray pano)
}
Stitcher::Status Stitcher::stitch(InputArray images, const std::vector<std::vector<Rect> > &rois, OutputArray pano)
Stitcher::Status Stitcher::stitch(InputArrayOfArrays images, const std::vector<std::vector<Rect> > &rois, OutputArray pano)
{
Status status = estimateTransform(images, rois);
if (status != OK)
@ -346,7 +389,7 @@ Stitcher::Status Stitcher::matchImages()
seam_scale_ = 1;
bool is_work_scale_set = false;
bool is_seam_scale_set = false;
Mat full_img, img;
UMat full_img, img;
features_.resize(imgs_.size());
seam_est_imgs_.resize(imgs_.size());
full_img_sizes_.resize(imgs_.size());
@ -420,8 +463,8 @@ Stitcher::Status Stitcher::matchImages()
// Leave only images we are sure are from the same panorama
indices_ = detail::leaveBiggestComponent(features_, pairwise_matches_, (float)conf_thresh_);
std::vector<Mat> seam_est_imgs_subset;
std::vector<Mat> imgs_subset;
std::vector<UMat> seam_est_imgs_subset;
std::vector<UMat> imgs_subset;
std::vector<Size> full_img_sizes_subset;
for (size_t i = 0; i < indices_.size(); ++i)
{
@ -454,7 +497,7 @@ Stitcher::Status Stitcher::estimateCameraParams()
Mat R;
cameras_[i].R.convertTo(R, CV_32F);
cameras_[i].R = R;
LOGLN("Initial intrinsic parameters #" << indices_[i] + 1 << ":\n " << cameras_[i].K());
//LOGLN("Initial intrinsic parameters #" << indices_[i] + 1 << ":\n " << cameras_[i].K());
}
bundle_adjuster_->setConfThresh(conf_thresh_);
@ -465,7 +508,7 @@ Stitcher::Status Stitcher::estimateCameraParams()
std::vector<double> focals;
for (size_t i = 0; i < cameras_.size(); ++i)
{
LOGLN("Camera #" << indices_[i] + 1 << ":\n" << cameras_[i].K());
//LOGLN("Camera #" << indices_[i] + 1 << ":\n" << cameras_[i].K());
focals.push_back(cameras_[i].focal);
}

View File

@ -113,7 +113,7 @@ bool overlapRoi(Point tl1, Point tl2, Size sz1, Size sz2, Rect &roi)
}
Rect resultRoi(const std::vector<Point> &corners, const std::vector<Mat> &images)
Rect resultRoi(const std::vector<Point> &corners, const std::vector<UMat> &images)
{
std::vector<Size> sizes(images.size());
for (size_t i = 0; i < images.size(); ++i)

View File

@ -41,6 +41,7 @@
//M*/
#include "precomp.hpp"
#include "opencl_kernels.hpp"
namespace cv {
namespace detail {
@ -86,7 +87,6 @@ Point2f PlaneWarper::warpPoint(const Point2f &pt, InputArray K, InputArray R, In
return uv;
}
Rect PlaneWarper::buildMaps(Size src_size, InputArray K, InputArray R, InputArray T, OutputArray _xmap, OutputArray _ymap)
{
projector_.setCameraParams(K, R, T);
@ -94,8 +94,29 @@ Rect PlaneWarper::buildMaps(Size src_size, InputArray K, InputArray R, InputArra
Point dst_tl, dst_br;
detectResultRoi(src_size, dst_tl, dst_br);
_xmap.create(dst_br.y - dst_tl.y + 1, dst_br.x - dst_tl.x + 1, CV_32F);
_ymap.create(dst_br.y - dst_tl.y + 1, dst_br.x - dst_tl.x + 1, CV_32F);
Size dsize(dst_br.x - dst_tl.x + 1, dst_br.y - dst_tl.y + 1);
_xmap.create(dsize, CV_32FC1);
_ymap.create(dsize, CV_32FC1);
if (ocl::useOpenCL())
{
ocl::Kernel k("buildWarpPlaneMaps", ocl::stitching::warpers_oclsrc);
if (!k.empty())
{
Mat k_rinv(1, 9, CV_32FC1, projector_.k_rinv), t(1, 3, CV_32FC1, projector_.t);
UMat uxmap = _xmap.getUMat(), uymap = _ymap.getUMat(),
uk_rinv = k_rinv.getUMat(ACCESS_READ), ut = t.getUMat(ACCESS_READ);
k.args(ocl::KernelArg::WriteOnlyNoSize(uxmap), ocl::KernelArg::WriteOnly(uymap),
ocl::KernelArg::PtrReadOnly(uk_rinv), ocl::KernelArg::PtrReadOnly(ut),
dst_tl.x, dst_tl.y, projector_.scale);
size_t globalsize[2] = { dsize.width, dsize.height };
if (k.run(2, globalsize, NULL, true))
return Rect(dst_tl, dst_br);
}
}
Mat xmap = _xmap.getMat(), ymap = _ymap.getMat();
@ -117,11 +138,11 @@ Rect PlaneWarper::buildMaps(Size src_size, InputArray K, InputArray R, InputArra
Point PlaneWarper::warp(InputArray src, InputArray K, InputArray R, InputArray T, int interp_mode, int border_mode,
OutputArray dst)
{
Mat xmap, ymap;
Rect dst_roi = buildMaps(src.size(), K, R, T, xmap, ymap);
UMat uxmap, uymap;
Rect dst_roi = buildMaps(src.size(), K, R, T, uxmap, uymap);
dst.create(dst_roi.height + 1, dst_roi.width + 1, src.type());
remap(src, dst, xmap, ymap, interp_mode, border_mode);
remap(src, dst, uxmap, uymap, interp_mode, border_mode);
return dst_roi.tl();
}
@ -341,5 +362,93 @@ void SphericalPortraitWarper::detectResultRoi(Size src_size, Point &dst_tl, Poin
dst_br.y = static_cast<int>(br_vf);
}
/////////////////////////////////////////// SphericalWarper ////////////////////////////////////////
Rect SphericalWarper::buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap)
{
if (ocl::useOpenCL())
{
ocl::Kernel k("buildWarpSphericalMaps", ocl::stitching::warpers_oclsrc);
if (!k.empty())
{
projector_.setCameraParams(K, R);
Point dst_tl, dst_br;
detectResultRoi(src_size, dst_tl, dst_br);
Size dsize(dst_br.x - dst_tl.x + 1, dst_br.y - dst_tl.y + 1);
xmap.create(dsize, CV_32FC1);
ymap.create(dsize, CV_32FC1);
Mat k_rinv(1, 9, CV_32FC1, projector_.k_rinv);
UMat uxmap = xmap.getUMat(), uymap = ymap.getUMat(), uk_rinv = k_rinv.getUMat(ACCESS_READ);
k.args(ocl::KernelArg::WriteOnlyNoSize(uxmap), ocl::KernelArg::WriteOnly(uymap),
ocl::KernelArg::PtrReadOnly(uk_rinv), dst_tl.x, dst_tl.y, projector_.scale);
size_t globalsize[2] = { dsize.width, dsize.height };
if (k.run(2, globalsize, NULL, true))
return Rect(dst_tl, dst_br);
}
}
return RotationWarperBase<SphericalProjector>::buildMaps(src_size, K, R, xmap, ymap);
}
Point SphericalWarper::warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode, OutputArray dst)
{
UMat uxmap, uymap;
Rect dst_roi = buildMaps(src.size(), K, R, uxmap, uymap);
dst.create(dst_roi.height + 1, dst_roi.width + 1, src.type());
remap(src, dst, uxmap, uymap, interp_mode, border_mode);
return dst_roi.tl();
}
/////////////////////////////////////////// CylindricalWarper ////////////////////////////////////////
Rect CylindricalWarper::buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap)
{
if (ocl::useOpenCL())
{
ocl::Kernel k("buildWarpCylindricalMaps", ocl::stitching::warpers_oclsrc);
if (!k.empty())
{
projector_.setCameraParams(K, R);
Point dst_tl, dst_br;
detectResultRoi(src_size, dst_tl, dst_br);
Size dsize(dst_br.x - dst_tl.x + 1, dst_br.y - dst_tl.y + 1);
xmap.create(dsize, CV_32FC1);
ymap.create(dsize, CV_32FC1);
Mat k_rinv(1, 9, CV_32FC1, projector_.k_rinv);
UMat uxmap = xmap.getUMat(), uymap = ymap.getUMat(), uk_rinv = k_rinv.getUMat(ACCESS_READ);
k.args(ocl::KernelArg::WriteOnlyNoSize(uxmap), ocl::KernelArg::WriteOnly(uymap),
ocl::KernelArg::PtrReadOnly(uk_rinv), dst_tl.x, dst_tl.y, projector_.scale);
size_t globalsize[2] = { dsize.width, dsize.height };
if (k.run(2, globalsize, NULL, true))
return Rect(dst_tl, dst_br);
}
}
return RotationWarperBase<CylindricalProjector>::buildMaps(src_size, K, R, xmap, ymap);
}
Point CylindricalWarper::warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode, OutputArray dst)
{
UMat uxmap, uymap;
Rect dst_roi = buildMaps(src.size(), K, R, uxmap, uymap);
dst.create(dst_roi.height + 1, dst_roi.width + 1, src.type());
remap(src, dst, uxmap, uymap, interp_mode, border_mode);
return dst_roi.tl();
}
} // namespace detail
} // namespace cv

View File

@ -1,187 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "opencl_kernels.hpp"
namespace cv {
namespace detail {
/////////////////////////////////////////// PlaneWarperOcl ////////////////////////////////////////////
Rect PlaneWarperOcl::buildMaps(Size src_size, InputArray K, InputArray R, InputArray T, OutputArray xmap, OutputArray ymap)
{
projector_.setCameraParams(K, R, T);
Point dst_tl, dst_br;
detectResultRoi(src_size, dst_tl, dst_br);
if (ocl::useOpenCL())
{
ocl::Kernel k("buildWarpPlaneMaps", ocl::stitching::warpers_oclsrc);
if (!k.empty())
{
Size dsize(dst_br.x - dst_tl.x + 1, dst_br.y - dst_tl.y + 1);
xmap.create(dsize, CV_32FC1);
ymap.create(dsize, CV_32FC1);
Mat k_rinv(1, 9, CV_32FC1, projector_.k_rinv), t(1, 3, CV_32FC1, projector_.t);
UMat uxmap = xmap.getUMat(), uymap = ymap.getUMat(),
uk_rinv = k_rinv.getUMat(ACCESS_READ), ut = t.getUMat(ACCESS_READ);
k.args(ocl::KernelArg::WriteOnlyNoSize(uxmap), ocl::KernelArg::WriteOnly(uymap),
ocl::KernelArg::PtrReadOnly(uk_rinv), ocl::KernelArg::PtrReadOnly(ut),
dst_tl.x, dst_tl.y, projector_.scale);
size_t globalsize[2] = { dsize.width, dsize.height };
if (k.run(2, globalsize, NULL, true))
return Rect(dst_tl, dst_br);
}
}
return PlaneWarper::buildMaps(src_size, K, R, T, xmap, ymap);
}
Point PlaneWarperOcl::warp(InputArray src, InputArray K, InputArray R, InputArray T, int interp_mode, int border_mode, OutputArray dst)
{
UMat uxmap, uymap;
Rect dst_roi = buildMaps(src.size(), K, R, T, uxmap, uymap);
dst.create(dst_roi.height + 1, dst_roi.width + 1, src.type());
UMat udst = dst.getUMat();
remap(src, udst, uxmap, uymap, interp_mode, border_mode);
return dst_roi.tl();
}
/////////////////////////////////////////// SphericalWarperOcl ////////////////////////////////////////
Rect SphericalWarperOcl::buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap)
{
projector_.setCameraParams(K, R);
Point dst_tl, dst_br;
detectResultRoi(src_size, dst_tl, dst_br);
if (ocl::useOpenCL())
{
ocl::Kernel k("buildWarpSphericalMaps", ocl::stitching::warpers_oclsrc);
if (!k.empty())
{
Size dsize(dst_br.x - dst_tl.x + 1, dst_br.y - dst_tl.y + 1);
xmap.create(dsize, CV_32FC1);
ymap.create(dsize, CV_32FC1);
Mat k_rinv(1, 9, CV_32FC1, projector_.k_rinv);
UMat uxmap = xmap.getUMat(), uymap = ymap.getUMat(), uk_rinv = k_rinv.getUMat(ACCESS_READ);
k.args(ocl::KernelArg::WriteOnlyNoSize(uxmap), ocl::KernelArg::WriteOnly(uymap),
ocl::KernelArg::PtrReadOnly(uk_rinv), dst_tl.x, dst_tl.y, projector_.scale);
size_t globalsize[2] = { dsize.width, dsize.height };
if (k.run(2, globalsize, NULL, true))
return Rect(dst_tl, dst_br);
}
}
return SphericalWarper::buildMaps(src_size, K, R, xmap, ymap);
}
Point SphericalWarperOcl::warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode, OutputArray dst)
{
UMat uxmap, uymap;
Rect dst_roi = buildMaps(src.size(), K, R, uxmap, uymap);
dst.create(dst_roi.height + 1, dst_roi.width + 1, src.type());
UMat udst = dst.getUMat();
remap(src, udst, uxmap, uymap, interp_mode, border_mode);
return dst_roi.tl();
}
/////////////////////////////////////////// CylindricalWarperOcl ////////////////////////////////////////
Rect CylindricalWarperOcl::buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap)
{
projector_.setCameraParams(K, R);
Point dst_tl, dst_br;
detectResultRoi(src_size, dst_tl, dst_br);
if (ocl::useOpenCL())
{
ocl::Kernel k("buildWarpCylindricalMaps", ocl::stitching::warpers_oclsrc);
if (!k.empty())
{
Size dsize(dst_br.x - dst_tl.x + 1, dst_br.y - dst_tl.y + 1);
xmap.create(dsize, CV_32FC1);
ymap.create(dsize, CV_32FC1);
Mat k_rinv(1, 9, CV_32FC1, projector_.k_rinv);
UMat uxmap = xmap.getUMat(), uymap = ymap.getUMat(), uk_rinv = k_rinv.getUMat(ACCESS_READ);
k.args(ocl::KernelArg::WriteOnlyNoSize(uxmap), ocl::KernelArg::WriteOnly(uymap),
ocl::KernelArg::PtrReadOnly(uk_rinv), dst_tl.x, dst_tl.y, projector_.scale);
size_t globalsize[2] = { dsize.width, dsize.height };
if (k.run(2, globalsize, NULL, true))
return Rect(dst_tl, dst_br);
}
}
return CylindricalWarper::buildMaps(src_size, K, R, xmap, ymap);
}
Point CylindricalWarperOcl::warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode, OutputArray dst)
{
UMat uxmap, uymap;
Rect dst_roi = buildMaps(src.size(), K, R, uxmap, uymap);
dst.create(dst_roi.height + 1, dst_roi.width + 1, src.type());
UMat udst = dst.getUMat();
remap(src, udst, uxmap, uymap, interp_mode, border_mode);
return dst_roi.tl();
}
} // namespace detail
} // namespace cv

View File

@ -48,13 +48,11 @@
namespace cvtest {
namespace ocl {
///////////////////////// WarperTestBase ///////////////////////////
struct WarperTestBase :
public Test, public TestUtils
{
Mat src, dst, xmap, ymap;
Mat udst, uxmap, uymap;
UMat usrc, udst, uxmap, uymap;
Mat K, R;
virtual void generateTestData()
@ -62,6 +60,7 @@ struct WarperTestBase :
Size size = randomSize(1, MAX_VALUE);
src = randomMat(size, CV_32FC1, -500, 500);
src.copyTo(usrc);
K = Mat::eye(3, 3, CV_32FC1);
float angle = (float)(30.0 * CV_PI / 180.0);
@ -81,70 +80,64 @@ struct WarperTestBase :
}
};
//////////////////////////////// SphericalWarperOcl /////////////////////////////////////////////////
typedef WarperTestBase SphericalWarperTest;
typedef WarperTestBase SphericalWarperOclTest;
OCL_TEST_F(SphericalWarperOclTest, Mat)
OCL_TEST_F(SphericalWarperTest, Mat)
{
for (int j = 0; j < test_loop_times; j++)
{
generateTestData();
Ptr<WarperCreator> creator = makePtr<SphericalWarperOcl>();
Ptr<WarperCreator> creator = makePtr<SphericalWarper>();
Ptr<detail::RotationWarper> warper = creator->create(2.0);
OCL_OFF(warper->buildMaps(src.size(), K, R, xmap, ymap));
OCL_ON(warper->buildMaps(src.size(), K, R, uxmap, uymap));
OCL_ON(warper->buildMaps(usrc.size(), K, R, uxmap, uymap));
OCL_OFF(warper->warp(src, K, R, INTER_LINEAR, BORDER_REPLICATE, dst));
OCL_ON(warper->warp(src, K, R, INTER_LINEAR, BORDER_REPLICATE, udst));
OCL_ON(warper->warp(usrc, K, R, INTER_LINEAR, BORDER_REPLICATE, udst));
Near(1e-4);
}
}
//////////////////////////////// CylindricalWarperOcl /////////////////////////////////////////////////
typedef WarperTestBase CylindricalWarperTest;
typedef WarperTestBase CylindricalWarperOclTest;
OCL_TEST_F(CylindricalWarperOclTest, Mat)
OCL_TEST_F(CylindricalWarperTest, Mat)
{
for (int j = 0; j < test_loop_times; j++)
{
generateTestData();
Ptr<WarperCreator> creator = makePtr<CylindricalWarperOcl>();
Ptr<WarperCreator> creator = makePtr<CylindricalWarper>();
Ptr<detail::RotationWarper> warper = creator->create(2.0);
OCL_OFF(warper->buildMaps(src.size(), K, R, xmap, ymap));
OCL_ON(warper->buildMaps(src.size(), K, R, uxmap, uymap));
OCL_ON(warper->buildMaps(usrc.size(), K, R, uxmap, uymap));
OCL_OFF(warper->warp(src, K, R, INTER_LINEAR, BORDER_REPLICATE, dst));
OCL_ON(warper->warp(src, K, R, INTER_LINEAR, BORDER_REPLICATE, udst));
OCL_ON(warper->warp(usrc, K, R, INTER_LINEAR, BORDER_REPLICATE, udst));
Near(1e-4);
}
}
//////////////////////////////// PlaneWarperOcl /////////////////////////////////////////////////
typedef WarperTestBase PlaneWarperTest;
typedef WarperTestBase PlaneWarperOclTest;
OCL_TEST_F(PlaneWarperOclTest, Mat)
OCL_TEST_F(PlaneWarperTest, Mat)
{
for (int j = 0; j < test_loop_times; j++)
{
generateTestData();
Ptr<WarperCreator> creator = makePtr<PlaneWarperOcl>();
Ptr<WarperCreator> creator = makePtr<PlaneWarper>();
Ptr<detail::RotationWarper> warper = creator->create(2.0);
OCL_OFF(warper->buildMaps(src.size(), K, R, xmap, ymap));
OCL_ON(warper->buildMaps(src.size(), K, R, uxmap, uymap));
OCL_ON(warper->buildMaps(usrc.size(), K, R, uxmap, uymap));
OCL_OFF(warper->warp(src, K, R, INTER_LINEAR, BORDER_REPLICATE, dst));
OCL_ON(warper->warp(src, K, R, INTER_LINEAR, BORDER_REPLICATE, udst));
OCL_ON(warper->warp(usrc, K, R, INTER_LINEAR, BORDER_REPLICATE, udst));
Near(1e-4);
}

View File

@ -73,6 +73,6 @@ TEST(MultiBandBlender, CanBlendTwoImages)
Mat result; result_s.convertTo(result, CV_8U);
Mat expected = imread(string(cvtest::TS::ptr()->get_data_path()) + "stitching/baboon_lena.png");
double rmsErr = cvtest::norm(expected, result, NORM_L2) / sqrt(double(expected.size().area()));
ASSERT_LT(rmsErr, 1e-3);
double psnr = cvtest::PSNR(expected, result);
EXPECT_GE(psnr, 50);
}

View File

@ -57,6 +57,31 @@ namespace ocl {
using namespace cv;
using namespace testing;
inline std::vector<UMat> ToUMat(const std::vector<Mat>& src)
{
std::vector<UMat> dst;
dst.resize(src.size());
for (size_t i = 0; i < src.size(); ++i)
{
src[i].copyTo(dst[i]);
}
return dst;
}
inline UMat ToUMat(const Mat& src)
{
UMat dst;
src.copyTo(dst);
return dst;
}
inline UMat ToUMat(InputArray src)
{
UMat dst;
src.getMat().copyTo(dst);
return dst;
}
extern int test_loop_times;
#define MAX_VALUE 357

View File

@ -74,9 +74,6 @@ static void printUsage()
" --try_cuda (yes|no)\n"
" Try to use CUDA. The default value is 'no'. All default values\n"
" are for CPU mode.\n"
" --try_ocl (yes|no)\n"
" Try to use OpenCL. The default value is 'no'. All default values\n"
" are for CPU mode.\n"
"\nMotion Estimation Flags:\n"
" --work_megapix <float>\n"
" Resolution for image registration step. The default is 0.6 Mpx.\n"
@ -127,7 +124,6 @@ static void printUsage()
vector<String> img_names;
bool preview = false;
bool try_cuda = false;
bool try_ocl = false;
double work_megapix = 0.6;
double seam_megapix = 0.1;
double compose_megapix = -1;
@ -178,19 +174,6 @@ static int parseCmdArgs(int argc, char** argv)
}
i++;
}
else if (string(argv[i]) == "--try_ocl")
{
if (string(argv[i + 1]) == "no")
try_ocl = false;
else if (string(argv[i + 1]) == "yes")
try_ocl = true;
else
{
cout << "Bad --try_ocl flag value\n";
return -1;
}
i++;
}
else if (string(argv[i]) == "--work_megapix")
{
work_megapix = atof(argv[i + 1]);
@ -348,7 +331,9 @@ int main(int argc, char* argv[])
int64 app_start_time = getTickCount();
#endif
#if 0
cv::setBreakOnError(true);
#endif
int retval = parseCmdArgs(argc, argv);
if (retval)
@ -554,10 +539,10 @@ int main(int argc, char* argv[])
#endif
vector<Point> corners(num_images);
vector<Mat> masks_warped(num_images);
vector<Mat> images_warped(num_images);
vector<UMat> masks_warped(num_images);
vector<UMat> images_warped(num_images);
vector<Size> sizes(num_images);
vector<Mat> masks(num_images);
vector<UMat> masks(num_images);
// Preapre images masks
for (int i = 0; i < num_images; ++i)
@ -569,17 +554,8 @@ int main(int argc, char* argv[])
// Warp images and their masks
Ptr<WarperCreator> warper_creator;
if (try_ocl)
{
if (warp_type == "plane")
warper_creator = makePtr<cv::PlaneWarperOcl>();
else if (warp_type == "cylindrical")
warper_creator = makePtr<cv::CylindricalWarperOcl>();
else if (warp_type == "spherical")
warper_creator = makePtr<cv::SphericalWarperOcl>();
}
#ifdef HAVE_OPENCV_CUDAWARPING
else if (try_cuda && cuda::getCudaEnabledDeviceCount() > 0)
if (try_cuda && cuda::getCudaEnabledDeviceCount() > 0)
{
if (warp_type == "plane")
warper_creator = makePtr<cv::PlaneWarperGpu>();
@ -645,7 +621,7 @@ int main(int argc, char* argv[])
warper->warp(masks[i], K, cameras[i].R, INTER_NEAREST, BORDER_CONSTANT, masks_warped[i]);
}
vector<Mat> images_warped_f(num_images);
vector<UMat> images_warped_f(num_images);
for (int i = 0; i < num_images; ++i)
images_warped[i].convertTo(images_warped_f[i], CV_32F);