Merge pull request #4063 from Dikay900:2_4_to_master

This commit is contained in:
Vadim Pisarevsky 2015-05-28 14:49:13 +00:00
commit f7ad192842
7 changed files with 267 additions and 18 deletions

View File

@ -93,25 +93,15 @@ else()
set(HAVE_CSTRIPES 0)
endif()
# --- OpenMP ---
if(WITH_OPENMP AND NOT HAVE_TBB AND NOT HAVE_CSTRIPES)
find_package(OpenMP)
if(OPENMP_FOUND)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
endif()
set(HAVE_OPENMP "${OPENMP_FOUND}")
endif()
# --- GCD ---
if(APPLE AND NOT HAVE_TBB AND NOT HAVE_CSTRIPES AND NOT HAVE_OPENMP)
if(APPLE AND NOT HAVE_TBB AND NOT HAVE_CSTRIPES)
set(HAVE_GCD 1)
else()
set(HAVE_GCD 0)
endif()
# --- Concurrency ---
if(MSVC AND NOT HAVE_TBB AND NOT HAVE_CSTRIPES AND NOT HAVE_OPENMP)
if(MSVC AND NOT HAVE_TBB AND NOT HAVE_CSTRIPES)
set(_fname "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/concurrencytest.cpp")
file(WRITE "${_fname}" "#if _MSC_VER < 1600\n#error\n#endif\nint main() { return 0; }\n")
try_compile(HAVE_CONCURRENCY "${CMAKE_BINARY_DIR}" "${_fname}")
@ -119,3 +109,13 @@ if(MSVC AND NOT HAVE_TBB AND NOT HAVE_CSTRIPES AND NOT HAVE_OPENMP)
else()
set(HAVE_CONCURRENCY 0)
endif()
# --- OpenMP ---
if(WITH_OPENMP)
find_package(OpenMP)
if(OPENMP_FOUND)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
endif()
set(HAVE_OPENMP "${OPENMP_FOUND}")
endif()

View File

@ -46,6 +46,7 @@
#include <cstdio>
#include <iterator>
#include <limits>
/****************************************************************************************\
The code below is implementation of HOG (Histogram-of-Oriented Gradients)
@ -3533,7 +3534,7 @@ void HOGDescriptor::groupRectangles(std::vector<cv::Rect>& rectList, std::vector
std::vector<cv::Rect_<double> > rrects(nclasses);
std::vector<int> numInClass(nclasses, 0);
std::vector<double> foundWeights(nclasses, DBL_MIN);
std::vector<double> foundWeights(nclasses, -std::numeric_limits<double>::max());
int i, j, nlabels = (int)labels.size();
for( i = 0; i < nlabels; i++ )

View File

@ -250,7 +250,7 @@ namespace
Ptr<FrameSource> cv::superres::createFrameSource_Video_CUDA(const String& fileName)
{
return makePtr<VideoFrameSource>(fileName);
return makePtr<VideoFrameSource_CUDA>(fileName);
}
#endif // HAVE_OPENCV_CUDACODEC

View File

@ -183,6 +183,7 @@ enum
CV_CAP_PROP_ROLL =35,
CV_CAP_PROP_IRIS =36,
CV_CAP_PROP_SETTINGS =37,
CV_CAP_PROP_BUFFERSIZE =38,
CV_CAP_PROP_AUTOGRAB =1024, // property for videoio class CvCapture_Android only
CV_CAP_PROP_SUPPORTED_PREVIEW_SIZES_STRING=1025, // readonly, tricky property, returns cpnst char* indeed

View File

@ -278,6 +278,7 @@ CvCaptureCAM_DC1394_v2_CPP::CvCaptureCAM_DC1394_v2_CPP()
dcCam = 0;
isoSpeed = 400;
fps = 15;
// Resetted the value here to 1 in order to ensure only a single frame is stored in the buffer!
nDMABufs = 8;
started = false;
cameraId = 0;
@ -688,6 +689,8 @@ double CvCaptureCAM_DC1394_v2_CPP::getProperty(int propId) const
break;
case CV_CAP_PROP_ISO_SPEED:
return (double) isoSpeed;
case CV_CAP_PROP_BUFFERSIZE:
return (double) nDMABufs;
default:
if (propId<CV_CAP_PROP_MAX_DC1394 && dc1394properties[propId]!=-1
&& dcCam)
@ -735,6 +738,11 @@ bool CvCaptureCAM_DC1394_v2_CPP::setProperty(int propId, double value)
return false;
isoSpeed = cvRound(value);
break;
case CV_CAP_PROP_BUFFERSIZE:
if(started)
return false;
nDMABufs = value;
break;
//The code below is based on coriander, callbacks.c:795, refer to case RANGE_MENU_MAN :
default:
if (propId<CV_CAP_PROP_MAX_DC1394 && dc1394properties[propId]!=-1

View File

@ -277,7 +277,7 @@ double CvCaptureCAM_XIMEA::getProperty( int property_id ) const
case CV_CAP_PROP_XI_AUTO_WB : xiGetParamInt( hmv, XI_PRM_AUTO_WB, &ival); return ival;
case CV_CAP_PROP_XI_AEAG : xiGetParamInt( hmv, XI_PRM_AEAG, &ival); return ival;
case CV_CAP_PROP_XI_EXP_PRIORITY : xiGetParamFloat( hmv, XI_PRM_EXP_PRIORITY, &fval); return fval;
case CV_CAP_PROP_XI_AE_MAX_LIMIT : xiGetParamInt( hmv, XI_PRM_EXP_PRIORITY, &ival); return ival;
case CV_CAP_PROP_XI_AE_MAX_LIMIT : xiGetParamInt( hmv, XI_PRM_AE_MAX_LIMIT, &ival); return ival;
case CV_CAP_PROP_XI_AG_MAX_LIMIT : xiGetParamFloat( hmv, XI_PRM_AG_MAX_LIMIT, &fval); return fval;
case CV_CAP_PROP_XI_AEAG_LEVEL : xiGetParamInt( hmv, XI_PRM_AEAG_LEVEL, &ival); return ival;
case CV_CAP_PROP_XI_TIMEOUT : return timeout;
@ -310,7 +310,7 @@ bool CvCaptureCAM_XIMEA::setProperty( int property_id, double value )
case CV_CAP_PROP_XI_OFFSET_Y : mvret = xiSetParamInt( hmv, XI_PRM_OFFSET_Y, ival); break;
case CV_CAP_PROP_XI_TRG_SOURCE : mvret = xiSetParamInt( hmv, XI_PRM_TRG_SOURCE, ival); break;
case CV_CAP_PROP_XI_GPI_SELECTOR : mvret = xiSetParamInt( hmv, XI_PRM_GPI_SELECTOR, ival); break;
case CV_CAP_PROP_XI_TRG_SOFTWARE : mvret = xiSetParamInt( hmv, XI_PRM_TRG_SOURCE, 1); break;
case CV_CAP_PROP_XI_TRG_SOFTWARE : mvret = xiSetParamInt( hmv, XI_PRM_TRG_SOFTWARE, 1); break;
case CV_CAP_PROP_XI_GPI_MODE : mvret = xiSetParamInt( hmv, XI_PRM_GPI_MODE, ival); break;
case CV_CAP_PROP_XI_GPI_LEVEL : mvret = xiSetParamInt( hmv, XI_PRM_GPI_LEVEL, ival); break;
case CV_CAP_PROP_XI_GPO_SELECTOR : mvret = xiSetParamInt( hmv, XI_PRM_GPO_SELECTOR, ival); break;
@ -318,10 +318,10 @@ bool CvCaptureCAM_XIMEA::setProperty( int property_id, double value )
case CV_CAP_PROP_XI_LED_SELECTOR : mvret = xiSetParamInt( hmv, XI_PRM_LED_SELECTOR, ival); break;
case CV_CAP_PROP_XI_LED_MODE : mvret = xiSetParamInt( hmv, XI_PRM_LED_MODE, ival); break;
case CV_CAP_PROP_XI_AUTO_WB : mvret = xiSetParamInt( hmv, XI_PRM_AUTO_WB, ival); break;
case CV_CAP_PROP_XI_MANUAL_WB : mvret = xiSetParamInt( hmv, XI_PRM_LED_MODE, ival); break;
case CV_CAP_PROP_XI_MANUAL_WB : mvret = xiSetParamInt( hmv, XI_PRM_MANUAL_WB, ival); break;
case CV_CAP_PROP_XI_AEAG : mvret = xiSetParamInt( hmv, XI_PRM_AEAG, ival); break;
case CV_CAP_PROP_XI_EXP_PRIORITY : mvret = xiSetParamFloat( hmv, XI_PRM_EXP_PRIORITY, fval); break;
case CV_CAP_PROP_XI_AE_MAX_LIMIT : mvret = xiSetParamInt( hmv, XI_PRM_EXP_PRIORITY, ival); break;
case CV_CAP_PROP_XI_AE_MAX_LIMIT : mvret = xiSetParamInt( hmv, XI_PRM_AE_MAX_LIMIT, ival); break;
case CV_CAP_PROP_XI_AG_MAX_LIMIT : mvret = xiSetParamFloat( hmv, XI_PRM_AG_MAX_LIMIT, fval); break;
case CV_CAP_PROP_XI_AEAG_LEVEL : mvret = xiSetParamInt( hmv, XI_PRM_AEAG_LEVEL, ival); break;
case CV_CAP_PROP_XI_TIMEOUT : timeout = ival; break;

View File

@ -0,0 +1,239 @@
/*
* Author: Samyak Datta (datta[dot]samyak[at]gmail.com)
*
* A program to detect facial feature points using
* Haarcascade classifiers for face, eyes, nose and mouth
*
*/
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <cstdio>
#include <vector>
#include <algorithm>
using namespace std;
using namespace cv;
// Functions to parse command-line arguments
static string getCommandOption(const vector<string>&, const string&);
static void setCommandOptions(vector<string>&, int, char**);
static bool doesCmdOptionExist(const vector<string>& , const string&);
// Functions for facial feature detection
static void help();
static void detectFaces(Mat&, vector<Rect_<int> >&, string);
static void detectEyes(Mat&, vector<Rect_<int> >&, string);
static void detectNose(Mat&, vector<Rect_<int> >&, string);
static void detectMouth(Mat&, vector<Rect_<int> >&, string);
static void detectFacialFeaures(Mat&, const vector<Rect_<int> >, string, string, string);
string input_image_path;
string face_cascade_path, eye_cascade_path, nose_cascade_path, mouth_cascade_path;
int main(int argc, char** argv)
{
if(argc < 3)
{
help();
return 1;
}
// Extract command-line options
vector<string> args;
setCommandOptions(args, argc, argv);
input_image_path = argv[1];
face_cascade_path = argv[2];
eye_cascade_path = (doesCmdOptionExist(args, "-eyes")) ? getCommandOption(args, "-eyes") : "";
nose_cascade_path = (doesCmdOptionExist(args, "-nose")) ? getCommandOption(args, "-nose") : "";
mouth_cascade_path = (doesCmdOptionExist(args, "-mouth")) ? getCommandOption(args, "-mouth") : "";
// Load image and cascade classifier files
Mat image;
image = imread(input_image_path);
// Detect faces and facial features
vector<Rect_<int> > faces;
detectFaces(image, faces, face_cascade_path);
detectFacialFeaures(image, faces, eye_cascade_path, nose_cascade_path, mouth_cascade_path);
imshow("Result", image);
waitKey(0);
return 0;
}
void setCommandOptions(vector<string>& args, int argc, char** argv)
{
for(int i = 1; i < argc; ++i)
{
args.push_back(argv[i]);
}
return;
}
string getCommandOption(const vector<string>& args, const string& opt)
{
string answer;
vector<string>::const_iterator it = find(args.begin(), args.end(), opt);
if(it != args.end() && (++it != args.end()))
answer = *it;
return answer;
}
bool doesCmdOptionExist(const vector<string>& args, const string& opt)
{
vector<string>::const_iterator it = find(args.begin(), args.end(), opt);
return (it != args.end());
}
static void help()
{
cout << "\nThis file demonstrates facial feature points detection using Haarcascade classifiers.\n"
"The program detects a face and eyes, nose and mouth inside the face."
"The code has been tested on the Japanese Female Facial Expression (JAFFE) database and found"
"to give reasonably accurate results. \n";
cout << "\nUSAGE: ./cpp-example-facial_features [IMAGE] [FACE_CASCADE] [OPTIONS]\n"
"IMAGE\n\tPath to the image of a face taken as input.\n"
"FACE_CASCSDE\n\t Path to a haarcascade classifier for face detection.\n"
"OPTIONS: \nThere are 3 options available which are described in detail. There must be a "
"space between the option and it's argument (All three options accept arguments).\n"
"\t-eyes : Specify the haarcascade classifier for eye detection.\n"
"\t-nose : Specify the haarcascade classifier for nose detection.\n"
"\t-mouth : Specify the haarcascade classifier for mouth detection.\n";
cout << "EXAMPLE:\n"
"(1) ./cpp-example-facial_features image.jpg face.xml -eyes eyes.xml -mouth mouth.xml\n"
"\tThis will detect the face, eyes and mouth in image.jpg.\n"
"(2) ./cpp-example-facial_features image.jpg face.xml -nose nose.xml\n"
"\tThis will detect the face and nose in image.jpg.\n"
"(3) ./cpp-example-facial_features image.jpg face.xml\n"
"\tThis will detect only the face in image.jpg.\n";
cout << " \n\nThe classifiers for face and eyes can be downloaded from : "
" \nhttps://github.com/Itseez/opencv/tree/master/data/haarcascades";
cout << "\n\nThe classifiers for nose and mouth can be downloaded from : "
" \nhttps://github.com/Itseez/opencv_contrib/tree/master/modules/face/data/cascades\n";
}
static void detectFaces(Mat& img, vector<Rect_<int> >& faces, string cascade_path)
{
CascadeClassifier face_cascade;
face_cascade.load(cascade_path);
face_cascade.detectMultiScale(img, faces, 1.15, 3, 0|CASCADE_SCALE_IMAGE, Size(30, 30));
return;
}
static void detectFacialFeaures(Mat& img, const vector<Rect_<int> > faces, string eye_cascade,
string nose_cascade, string mouth_cascade)
{
for(unsigned int i = 0; i < faces.size(); ++i)
{
// Mark the bounding box enclosing the face
Rect face = faces[i];
rectangle(img, Point(face.x, face.y), Point(face.x+face.width, face.y+face.height),
Scalar(255, 0, 0), 1, 4);
// Eyes, nose and mouth will be detected inside the face (region of interest)
Mat ROI = img(Rect(face.x, face.y, face.width, face.height));
// Check if all features (eyes, nose and mouth) are being detected
bool is_full_detection = false;
if( (!eye_cascade.empty()) && (!nose_cascade.empty()) && (!mouth_cascade.empty()) )
is_full_detection = true;
// Detect eyes if classifier provided by the user
if(!eye_cascade.empty())
{
vector<Rect_<int> > eyes;
detectEyes(ROI, eyes, eye_cascade);
// Mark points corresponding to the centre of the eyes
for(unsigned int j = 0; j < eyes.size(); ++j)
{
Rect e = eyes[j];
circle(ROI, Point(e.x+e.width/2, e.y+e.height/2), 3, Scalar(0, 255, 0), -1, 8);
/* rectangle(ROI, Point(e.x, e.y), Point(e.x+e.width, e.y+e.height),
Scalar(0, 255, 0), 1, 4); */
}
}
// Detect nose if classifier provided by the user
double nose_center_height = 0.0;
if(!nose_cascade.empty())
{
vector<Rect_<int> > nose;
detectNose(ROI, nose, nose_cascade);
// Mark points corresponding to the centre (tip) of the nose
for(unsigned int j = 0; j < nose.size(); ++j)
{
Rect n = nose[j];
circle(ROI, Point(n.x+n.width/2, n.y+n.height/2), 3, Scalar(0, 255, 0), -1, 8);
nose_center_height = (n.y + n.height/2);
}
}
// Detect mouth if classifier provided by the user
double mouth_center_height = 0.0;
if(!mouth_cascade.empty())
{
vector<Rect_<int> > mouth;
detectMouth(ROI, mouth, mouth_cascade);
for(unsigned int j = 0; j < mouth.size(); ++j)
{
Rect m = mouth[j];
mouth_center_height = (m.y + m.height/2);
// The mouth should lie below the nose
if( (is_full_detection) && (mouth_center_height > nose_center_height) )
{
rectangle(ROI, Point(m.x, m.y), Point(m.x+m.width, m.y+m.height), Scalar(0, 255, 0), 1, 4);
}
else if( (is_full_detection) && (mouth_center_height <= nose_center_height) )
continue;
else
rectangle(ROI, Point(m.x, m.y), Point(m.x+m.width, m.y+m.height), Scalar(0, 255, 0), 1, 4);
}
}
}
return;
}
static void detectEyes(Mat& img, vector<Rect_<int> >& eyes, string cascade_path)
{
CascadeClassifier eyes_cascade;
eyes_cascade.load(cascade_path);
eyes_cascade.detectMultiScale(img, eyes, 1.20, 5, 0|CASCADE_SCALE_IMAGE, Size(30, 30));
return;
}
static void detectNose(Mat& img, vector<Rect_<int> >& nose, string cascade_path)
{
CascadeClassifier nose_cascade;
nose_cascade.load(cascade_path);
nose_cascade.detectMultiScale(img, nose, 1.20, 5, 0|CASCADE_SCALE_IMAGE, Size(30, 30));
return;
}
static void detectMouth(Mat& img, vector<Rect_<int> >& mouth, string cascade_path)
{
CascadeClassifier mouth_cascade;
mouth_cascade.load(cascade_path);
mouth_cascade.detectMultiScale(img, mouth, 1.20, 5, 0|CASCADE_SCALE_IMAGE, Size(30, 30));
return;
}