Merge pull request #938 from pengx17:2.4_surf_sample

This commit is contained in:
Vadim Pisarevsky 2013-06-04 13:33:48 +04:00 committed by OpenCV Buildbot
commit 267eb99a7d

View File

@ -46,150 +46,95 @@
#include <iostream>
#include <stdio.h>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/ocl/ocl.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/nonfree/ocl.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/nonfree/nonfree.hpp"
using namespace std;
using namespace cv;
using namespace cv::ocl;
//#define USE_CPU_DESCRIPTOR // use cpu descriptor extractor until ocl descriptor extractor is fixed
//#define USE_CPU_BFMATCHER
const int LOOP_NUM = 10;
const int GOOD_PTS_MAX = 50;
const float GOOD_PORTION = 0.15f;
namespace
{
void help();
void help()
{
cout << "\nThis program demonstrates using SURF_OCL features detector and descriptor extractor" << endl;
cout << "\nUsage:\n\tsurf_matcher --left <image1> --right <image2>" << endl;
std::cout << "\nThis program demonstrates using SURF_OCL features detector and descriptor extractor" << std::endl;
std::cout << "\nUsage:\n\tsurf_matcher --left <image1> --right <image2> [-c]" << std::endl;
std::cout << "\nExample:\n\tsurf_matcher --left box.png --right box_in_scene.png" << std::endl;
}
int64 work_begin = 0;
int64 work_end = 0;
////////////////////////////////////////////////////
// This program demonstrates the usage of SURF_OCL.
// use cpu findHomography interface to calculate the transformation matrix
int main(int argc, char* argv[])
void workBegin()
{
if (argc != 5 && argc != 1)
{
help();
return -1;
work_begin = getTickCount();
}
vector<cv::ocl::Info> info;
if(!cv::ocl::getDevice(info))
void workEnd()
{
cout << "Error: Did not find a valid OpenCL device!" << endl;
return -1;
}
Mat cpu_img1, cpu_img2, cpu_img1_grey, cpu_img2_grey;
oclMat img1, img2;
if(argc != 5)
{
cpu_img1 = imread("o.png");
cvtColor(cpu_img1, cpu_img1_grey, CV_BGR2GRAY);
img1 = cpu_img1_grey;
CV_Assert(!img1.empty());
cpu_img2 = imread("r2.png");
cvtColor(cpu_img2, cpu_img2_grey, CV_BGR2GRAY);
img2 = cpu_img2_grey;
}
else
{
for (int i = 1; i < argc; ++i)
{
if (string(argv[i]) == "--left")
{
cpu_img1 = imread(argv[++i]);
cvtColor(cpu_img1, cpu_img1_grey, CV_BGR2GRAY);
img1 = cpu_img1_grey;
CV_Assert(!img1.empty());
}
else if (string(argv[i]) == "--right")
{
cpu_img2 = imread(argv[++i]);
cvtColor(cpu_img2, cpu_img2_grey, CV_BGR2GRAY);
img2 = cpu_img2_grey;
}
else if (string(argv[i]) == "--help")
{
help();
return -1;
}
work_end = getTickCount() - work_begin;
}
double getTime(){
return work_end /((double)cvGetTickFrequency() * 1000.);
}
SURF_OCL surf;
//surf.hessianThreshold = 400.f;
//surf.extended = false;
// detecting keypoints & computing descriptors
oclMat keypoints1GPU, keypoints2GPU;
oclMat descriptors1GPU, descriptors2GPU;
// downloading results
vector<KeyPoint> keypoints1, keypoints2;
vector<DMatch> matches;
#ifndef USE_CPU_DESCRIPTOR
surf(img1, oclMat(), keypoints1GPU, descriptors1GPU);
surf(img2, oclMat(), keypoints2GPU, descriptors2GPU);
surf.downloadKeypoints(keypoints1GPU, keypoints1);
surf.downloadKeypoints(keypoints2GPU, keypoints2);
#ifdef USE_CPU_BFMATCHER
//BFMatcher
BFMatcher matcher(cv::NORM_L2);
matcher.match(Mat(descriptors1GPU), Mat(descriptors2GPU), matches);
#else
BruteForceMatcher_OCL_base matcher(BruteForceMatcher_OCL_base::L2Dist);
matcher.match(descriptors1GPU, descriptors2GPU, matches);
#endif
#else
surf(img1, oclMat(), keypoints1GPU);
surf(img2, oclMat(), keypoints2GPU);
surf.downloadKeypoints(keypoints1GPU, keypoints1);
surf.downloadKeypoints(keypoints2GPU, keypoints2);
// use SURF_OCL to detect keypoints and use SURF to extract descriptors
SURF surf_cpu;
Mat descriptors1, descriptors2;
surf_cpu(cpu_img1, Mat(), keypoints1, descriptors1, true);
surf_cpu(cpu_img2, Mat(), keypoints2, descriptors2, true);
matcher.match(descriptors1, descriptors2, matches);
#endif
cout << "OCL: FOUND " << keypoints1GPU.cols << " keypoints on first image" << endl;
cout << "OCL: FOUND " << keypoints2GPU.cols << " keypoints on second image" << endl;
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( size_t i = 0; i < keypoints1.size(); i++ )
template<class KPDetector>
struct SURFDetector
{
KPDetector surf;
SURFDetector(double hessian = 800.0)
:surf(hessian)
{
double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
template<class T>
void operator()(const T& in, const T& mask, vector<cv::KeyPoint>& pts, T& descriptors, bool useProvided = false)
{
surf(in, mask, pts, descriptors, useProvided);
}
};
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
template<class KPMatcher>
struct SURFMatcher
{
KPMatcher matcher;
template<class T>
void match(const T& in1, const T& in2, vector<cv::DMatch>& matches)
{
matcher.match(in1, in2, matches);
}
};
//-- Draw only "good" matches (i.e. whose distance is less than 2.5*min_dist )
Mat drawGoodMatches(
const Mat& cpu_img1,
const Mat& cpu_img2,
const vector<KeyPoint>& keypoints1,
const vector<KeyPoint>& keypoints2,
vector<DMatch>& matches,
vector<Point2f>& scene_corners_
)
{
//-- Sort matches and preserve top 10% matches
std::sort(matches.begin(), matches.end());
std::vector< DMatch > good_matches;
double minDist = matches.front().distance,
maxDist = matches.back().distance;
for( size_t i = 0; i < keypoints1.size(); i++ )
{
if( matches[i].distance < 3*min_dist )
const int ptsPairs = std::min(GOOD_PTS_MAX, (int)(matches.size() * GOOD_PORTION));
for( int i = 0; i < ptsPairs; i++ )
{
good_matches.push_back( matches[i] );
}
}
std::cout << "\nMax distance: " << maxDist << std::endl;
std::cout << "Min distance: " << minDist << std::endl;
std::cout << "Calculating homography using " << ptsPairs << " point pairs." << std::endl;
// drawing the results
Mat img_matches;
@ -207,26 +152,238 @@ int main(int argc, char* argv[])
obj.push_back( keypoints1[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints2[ good_matches[i].trainIdx ].pt );
}
Mat H = findHomography( obj, scene, CV_RANSAC );
//-- Get the corners from the image_1 ( the object to be "detected" )
std::vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( cpu_img1.cols, 0 );
obj_corners[2] = cvPoint( cpu_img1.cols, cpu_img1.rows ); obj_corners[3] = cvPoint( 0, cpu_img1.rows );
std::vector<Point2f> scene_corners(4);
Mat H = findHomography( obj, scene, CV_RANSAC );
perspectiveTransform( obj_corners, scene_corners, H);
scene_corners_ = scene_corners;
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
line( img_matches, scene_corners[0] + Point2f( (float)cpu_img1.cols, 0), scene_corners[1] + Point2f( (float)cpu_img1.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[1] + Point2f( (float)cpu_img1.cols, 0), scene_corners[2] + Point2f( (float)cpu_img1.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[2] + Point2f( (float)cpu_img1.cols, 0), scene_corners[3] + Point2f( (float)cpu_img1.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[3] + Point2f( (float)cpu_img1.cols, 0), scene_corners[0] + Point2f( (float)cpu_img1.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches,
scene_corners[0] + Point2f( (float)cpu_img1.cols, 0), scene_corners[1] + Point2f( (float)cpu_img1.cols, 0),
Scalar( 0, 255, 0), 2, CV_AA );
line( img_matches,
scene_corners[1] + Point2f( (float)cpu_img1.cols, 0), scene_corners[2] + Point2f( (float)cpu_img1.cols, 0),
Scalar( 0, 255, 0), 2, CV_AA );
line( img_matches,
scene_corners[2] + Point2f( (float)cpu_img1.cols, 0), scene_corners[3] + Point2f( (float)cpu_img1.cols, 0),
Scalar( 0, 255, 0), 2, CV_AA );
line( img_matches,
scene_corners[3] + Point2f( (float)cpu_img1.cols, 0), scene_corners[0] + Point2f( (float)cpu_img1.cols, 0),
Scalar( 0, 255, 0), 2, CV_AA );
return img_matches;
}
}
////////////////////////////////////////////////////
// This program demonstrates the usage of SURF_OCL.
// use cpu findHomography interface to calculate the transformation matrix
int main(int argc, char* argv[])
{
vector<cv::ocl::Info> info;
if(cv::ocl::getDevice(info) == 0)
{
std::cout << "Error: Did not find a valid OpenCL device!" << std::endl;
return -1;
}
ocl::setDevice(info[0]);
Mat cpu_img1, cpu_img2, cpu_img1_grey, cpu_img2_grey;
oclMat img1, img2;
bool useCPU = false;
bool useGPU = false;
bool useALL = false;
for (int i = 1; i < argc; ++i)
{
if (string(argv[i]) == "--left")
{
cpu_img1 = imread(argv[++i]);
CV_Assert(!cpu_img1.empty());
cvtColor(cpu_img1, cpu_img1_grey, CV_BGR2GRAY);
img1 = cpu_img1_grey;
}
else if (string(argv[i]) == "--right")
{
cpu_img2 = imread(argv[++i]);
CV_Assert(!cpu_img2.empty());
cvtColor(cpu_img2, cpu_img2_grey, CV_BGR2GRAY);
img2 = cpu_img2_grey;
}
else if (string(argv[i]) == "-c")
{
useCPU = true;
useGPU = false;
useALL = false;
}else if(string(argv[i]) == "-g")
{
useGPU = true;
useCPU = false;
useALL = false;
}else if(string(argv[i]) == "-a")
{
useALL = true;
useCPU = false;
useGPU = false;
}
else if (string(argv[i]) == "--help")
{
help();
return -1;
}
}
if(!useCPU)
{
std::cout
<< "Device name:"
<< info[0].DeviceName[0]
<< std::endl;
}
double surf_time = 0.;
//declare input/output
vector<KeyPoint> keypoints1, keypoints2;
vector<DMatch> matches;
vector<KeyPoint> gpu_keypoints1;
vector<KeyPoint> gpu_keypoints2;
vector<DMatch> gpu_matches;
Mat descriptors1CPU, descriptors2CPU;
oclMat keypoints1GPU, keypoints2GPU;
oclMat descriptors1GPU, descriptors2GPU;
//instantiate detectors/matchers
SURFDetector<SURF> cpp_surf;
SURFDetector<SURF_OCL> ocl_surf;
SURFMatcher<BFMatcher> cpp_matcher;
SURFMatcher<BFMatcher_OCL> ocl_matcher;
//-- start of timing section
if (useCPU)
{
for (int i = 0; i <= LOOP_NUM; i++)
{
if(i == 1) workBegin();
cpp_surf(cpu_img1_grey, Mat(), keypoints1, descriptors1CPU);
cpp_surf(cpu_img2_grey, Mat(), keypoints2, descriptors2CPU);
cpp_matcher.match(descriptors1CPU, descriptors2CPU, matches);
}
workEnd();
std::cout << "CPP: FOUND " << keypoints1.size() << " keypoints on first image" << std::endl;
std::cout << "CPP: FOUND " << keypoints2.size() << " keypoints on second image" << std::endl;
surf_time = getTime();
std::cout << "SURF run time: " << surf_time / LOOP_NUM << " ms" << std::endl<<"\n";
}
else if(useGPU)
{
for (int i = 0; i <= LOOP_NUM; i++)
{
if(i == 1) workBegin();
ocl_surf(img1, oclMat(), keypoints1, descriptors1GPU);
ocl_surf(img2, oclMat(), keypoints2, descriptors2GPU);
ocl_matcher.match(descriptors1GPU, descriptors2GPU, matches);
}
workEnd();
std::cout << "OCL: FOUND " << keypoints1.size() << " keypoints on first image" << std::endl;
std::cout << "OCL: FOUND " << keypoints2.size() << " keypoints on second image" << std::endl;
surf_time = getTime();
std::cout << "SURF run time: " << surf_time / LOOP_NUM << " ms" << std::endl<<"\n";
}else
{
//cpu runs
for (int i = 0; i <= LOOP_NUM; i++)
{
if(i == 1) workBegin();
cpp_surf(cpu_img1_grey, Mat(), keypoints1, descriptors1CPU);
cpp_surf(cpu_img2_grey, Mat(), keypoints2, descriptors2CPU);
cpp_matcher.match(descriptors1CPU, descriptors2CPU, matches);
}
workEnd();
std::cout << "\nCPP: FOUND " << keypoints1.size() << " keypoints on first image" << std::endl;
std::cout << "CPP: FOUND " << keypoints2.size() << " keypoints on second image" << std::endl;
surf_time = getTime();
std::cout << "(CPP)SURF run time: " << surf_time / LOOP_NUM << " ms" << std::endl;
//gpu runs
for (int i = 0; i <= LOOP_NUM; i++)
{
if(i == 1) workBegin();
ocl_surf(img1, oclMat(), gpu_keypoints1, descriptors1GPU);
ocl_surf(img2, oclMat(), gpu_keypoints2, descriptors2GPU);
ocl_matcher.match(descriptors1GPU, descriptors2GPU, gpu_matches);
}
workEnd();
std::cout << "\nOCL: FOUND " << keypoints1.size() << " keypoints on first image" << std::endl;
std::cout << "OCL: FOUND " << keypoints2.size() << " keypoints on second image" << std::endl;
surf_time = getTime();
std::cout << "(OCL)SURF run time: " << surf_time / LOOP_NUM << " ms" << std::endl<<"\n";
}
//--------------------------------------------------------------------------
std::vector<Point2f> cpu_corner;
Mat img_matches = drawGoodMatches(cpu_img1, cpu_img2, keypoints1, keypoints2, matches, cpu_corner);
std::vector<Point2f> gpu_corner;
Mat ocl_img_matches;
if(useALL || (!useCPU&&!useGPU))
{
ocl_img_matches = drawGoodMatches(cpu_img1, cpu_img2, gpu_keypoints1, gpu_keypoints2, gpu_matches, gpu_corner);
//check accuracy
std::cout<<"\nCheck accuracy:\n";
if(cpu_corner.size()!=gpu_corner.size())
std::cout<<"Failed\n";
else
{
bool result = false;
for(size_t i = 0; i < cpu_corner.size(); i++)
{
if((std::abs(cpu_corner[i].x - gpu_corner[i].x) > 10)
||(std::abs(cpu_corner[i].y - gpu_corner[i].y) > 10))
{
std::cout<<"Failed\n";
result = false;
break;
}
result = true;
}
if(result)
std::cout<<"Passed\n";
}
}
//-- Show detected matches
if (useCPU)
{
namedWindow("cpu surf matches", 0);
imshow("cpu surf matches", img_matches);
}
else if(useGPU)
{
namedWindow("ocl surf matches", 0);
imshow("ocl surf matches", img_matches);
waitKey(0);
}else
{
namedWindow("cpu surf matches", 0);
imshow("cpu surf matches", img_matches);
namedWindow("ocl surf matches", 0);
imshow("ocl surf matches", ocl_img_matches);
}
waitKey(0);
return 0;
}