mirror of
https://github.com/opencv/opencv.git
synced 2025-01-18 22:44:02 +08:00
Reverted changes breaking Android build
This commit is contained in:
parent
a497e1b37e
commit
e8d158a5c8
@ -12,66 +12,17 @@ class DetectionBasedTracker
|
||||
public:
|
||||
struct Parameters
|
||||
{
|
||||
int minObjectSize;
|
||||
int maxObjectSize;
|
||||
double scaleFactor;
|
||||
int maxTrackLifetime;
|
||||
int minNeighbors;
|
||||
int minDetectionPeriod; //the minimal time between run of the big object detector (on the whole frame) in ms (1000 mean 1 sec), default=0
|
||||
|
||||
Parameters();
|
||||
};
|
||||
|
||||
class IDetector
|
||||
{
|
||||
public:
|
||||
IDetector():
|
||||
minObjSize(96, 96),
|
||||
maxObjSize(INT_MAX, INT_MAX),
|
||||
scaleFactor(1.1f),
|
||||
minNeighbours(2)
|
||||
{}
|
||||
|
||||
virtual void detect(const cv::Mat& Image, std::vector<cv::Rect>& objects) = 0;
|
||||
|
||||
void setMinObjectSize(const cv::Size& min)
|
||||
{
|
||||
minObjSize = min;
|
||||
}
|
||||
void setMaxObjectSize(const cv::Size& max)
|
||||
{
|
||||
maxObjSize = max;
|
||||
}
|
||||
cv::Size getMinObjectSize() const
|
||||
{
|
||||
return minObjSize;
|
||||
}
|
||||
cv::Size getMaxObjectSize() const
|
||||
{
|
||||
return maxObjSize;
|
||||
}
|
||||
float getScaleFactor()
|
||||
{
|
||||
return scaleFactor;
|
||||
}
|
||||
void setScaleFactor(float value)
|
||||
{
|
||||
scaleFactor = value;
|
||||
}
|
||||
int getMinNeighbours()
|
||||
{
|
||||
return minNeighbours;
|
||||
}
|
||||
void setMinNeighbours(int value)
|
||||
{
|
||||
minNeighbours = value;
|
||||
}
|
||||
virtual ~IDetector() {}
|
||||
|
||||
protected:
|
||||
cv::Size minObjSize;
|
||||
cv::Size maxObjSize;
|
||||
int minNeighbours;
|
||||
float scaleFactor;
|
||||
};
|
||||
|
||||
DetectionBasedTracker(cv::Ptr<IDetector> MainDetector, cv::Ptr<IDetector> TrackingDetector, const Parameters& params);
|
||||
DetectionBasedTracker(const std::string& cascadeFilename, const Parameters& params);
|
||||
virtual ~DetectionBasedTracker();
|
||||
|
||||
virtual bool run();
|
||||
@ -93,6 +44,7 @@ class DetectionBasedTracker
|
||||
cv::Ptr<SeparateDetectionWork> separateDetectionWork;
|
||||
friend void* workcycleObjectDetectorFunction(void* p);
|
||||
|
||||
|
||||
struct InnerParameters
|
||||
{
|
||||
int numLastPositionsToTrack;
|
||||
@ -138,7 +90,8 @@ class DetectionBasedTracker
|
||||
std::vector<float> weightsPositionsSmoothing;
|
||||
std::vector<float> weightsSizesSmoothing;
|
||||
|
||||
cv::Ptr<IDetector> cascadeForTracking;
|
||||
cv::CascadeClassifier cascadeForTracking;
|
||||
|
||||
|
||||
void updateTrackedObjects(const std::vector<cv::Rect>& detectedObjects);
|
||||
cv::Rect calcTrackedObjectPositionToShow(int i) const;
|
||||
|
@ -52,11 +52,10 @@ static inline cv::Rect scale_rect(const cv::Rect& r, float scale)
|
||||
};
|
||||
|
||||
void* workcycleObjectDetectorFunction(void* p);
|
||||
|
||||
class DetectionBasedTracker::SeparateDetectionWork
|
||||
{
|
||||
public:
|
||||
SeparateDetectionWork(DetectionBasedTracker& _detectionBasedTracker, cv::Ptr<DetectionBasedTracker::IDetector> _detector);
|
||||
SeparateDetectionWork(DetectionBasedTracker& _detectionBasedTracker, const std::string& cascadeFilename);
|
||||
virtual ~SeparateDetectionWork();
|
||||
bool communicateWithDetectingThread(const Mat& imageGray, vector<Rect>& rectsWhereRegions);
|
||||
bool run();
|
||||
@ -78,7 +77,7 @@ class DetectionBasedTracker::SeparateDetectionWork
|
||||
protected:
|
||||
|
||||
DetectionBasedTracker& detectionBasedTracker;
|
||||
cv::Ptr<DetectionBasedTracker::IDetector> cascadeInThread;
|
||||
cv::CascadeClassifier cascadeInThread;
|
||||
|
||||
pthread_t second_workthread;
|
||||
pthread_mutex_t mutex;
|
||||
@ -106,7 +105,7 @@ class DetectionBasedTracker::SeparateDetectionWork
|
||||
long long timeWhenDetectingThreadStartedWork;
|
||||
};
|
||||
|
||||
DetectionBasedTracker::SeparateDetectionWork::SeparateDetectionWork(DetectionBasedTracker& _detectionBasedTracker, cv::Ptr<DetectionBasedTracker::IDetector> _detector)
|
||||
DetectionBasedTracker::SeparateDetectionWork::SeparateDetectionWork(DetectionBasedTracker& _detectionBasedTracker, const std::string& cascadeFilename)
|
||||
:detectionBasedTracker(_detectionBasedTracker),
|
||||
cascadeInThread(),
|
||||
isObjectDetectingReady(false),
|
||||
@ -114,10 +113,9 @@ DetectionBasedTracker::SeparateDetectionWork::SeparateDetectionWork(DetectionBas
|
||||
stateThread(STATE_THREAD_STOPPED),
|
||||
timeWhenDetectingThreadStartedWork(-1)
|
||||
{
|
||||
CV_Assert(!_detector.empty());
|
||||
|
||||
cascadeInThread = _detector;
|
||||
|
||||
if(!cascadeInThread.load(cascadeFilename)) {
|
||||
CV_Error(CV_StsBadArg, "DetectionBasedTracker::SeparateDetectionWork::SeparateDetectionWork: Cannot load a cascade from the file '"+cascadeFilename+"'");
|
||||
}
|
||||
int res=0;
|
||||
res=pthread_mutex_init(&mutex, NULL);//TODO: should be attributes?
|
||||
if (res) {
|
||||
@ -276,17 +274,20 @@ void DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector()
|
||||
|
||||
int64 t1_detect=getTickCount();
|
||||
|
||||
cascadeInThread->detect(imageSeparateDetecting, objects);
|
||||
int minObjectSize=detectionBasedTracker.parameters.minObjectSize;
|
||||
Size min_objectSize=Size(minObjectSize, minObjectSize);
|
||||
|
||||
/*cascadeInThread.detectMultiScale( imageSeparateDetecting, objects,
|
||||
int maxObjectSize=detectionBasedTracker.parameters.maxObjectSize;
|
||||
Size max_objectSize(maxObjectSize, maxObjectSize);
|
||||
|
||||
|
||||
cascadeInThread.detectMultiScale( imageSeparateDetecting, objects,
|
||||
detectionBasedTracker.parameters.scaleFactor, detectionBasedTracker.parameters.minNeighbors, 0
|
||||
|CV_HAAR_SCALE_IMAGE
|
||||
,
|
||||
min_objectSize,
|
||||
max_objectSize
|
||||
);
|
||||
*/
|
||||
|
||||
LOGD("DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector() --- end handling imageSeparateDetecting");
|
||||
|
||||
if (!isWorking()) {
|
||||
@ -421,10 +422,16 @@ bool DetectionBasedTracker::SeparateDetectionWork::communicateWithDetectingThrea
|
||||
|
||||
DetectionBasedTracker::Parameters::Parameters()
|
||||
{
|
||||
minObjectSize=96;
|
||||
maxObjectSize=INT_MAX;
|
||||
scaleFactor=1.1;
|
||||
maxTrackLifetime=5;
|
||||
minNeighbors=2;
|
||||
minDetectionPeriod=0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
DetectionBasedTracker::InnerParameters::InnerParameters()
|
||||
{
|
||||
numLastPositionsToTrack=4;
|
||||
@ -437,32 +444,39 @@ DetectionBasedTracker::InnerParameters::InnerParameters()
|
||||
coeffObjectSpeedUsingInPrediction=0.8;
|
||||
|
||||
}
|
||||
|
||||
DetectionBasedTracker::DetectionBasedTracker(cv::Ptr<IDetector> MainDetector, cv::Ptr<IDetector> TrackingDetector, const Parameters& params)
|
||||
DetectionBasedTracker::DetectionBasedTracker(const std::string& cascadeFilename, const Parameters& params)
|
||||
:separateDetectionWork(),
|
||||
innerParameters(),
|
||||
cascadeForTracking(TrackingDetector),
|
||||
parameters(params),
|
||||
numTrackedSteps(0)
|
||||
{
|
||||
CV_Assert( (params.maxTrackLifetime >= 0)
|
||||
&& (!MainDetector.empty())
|
||||
&& (!TrackingDetector.empty()) );
|
||||
CV_Assert( (params.minObjectSize > 0)
|
||||
&& (params.maxObjectSize >= 0)
|
||||
&& (params.scaleFactor > 1.0)
|
||||
&& (params.maxTrackLifetime >= 0) );
|
||||
|
||||
separateDetectionWork = new SeparateDetectionWork(*this, MainDetector);
|
||||
if (!cascadeForTracking.load(cascadeFilename)) {
|
||||
CV_Error(CV_StsBadArg, "DetectionBasedTracker::DetectionBasedTracker: Cannot load a cascade from the file '"+cascadeFilename+"'");
|
||||
}
|
||||
|
||||
parameters=params;
|
||||
|
||||
separateDetectionWork=new SeparateDetectionWork(*this, cascadeFilename);
|
||||
|
||||
weightsPositionsSmoothing.push_back(1);
|
||||
weightsSizesSmoothing.push_back(0.5);
|
||||
weightsSizesSmoothing.push_back(0.3);
|
||||
weightsSizesSmoothing.push_back(0.2);
|
||||
}
|
||||
|
||||
}
|
||||
DetectionBasedTracker::~DetectionBasedTracker()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
|
||||
void DetectionBasedTracker::process(const Mat& imageGray)
|
||||
{
|
||||
|
||||
CV_Assert(imageGray.type()==CV_8UC1);
|
||||
|
||||
if (!separateDetectionWork->isWorking()) {
|
||||
@ -480,9 +494,15 @@ void DetectionBasedTracker::process(const Mat& imageGray)
|
||||
|
||||
Mat imageDetect=imageGray;
|
||||
|
||||
int D=parameters.minObjectSize;
|
||||
if (D < 1)
|
||||
D=1;
|
||||
|
||||
vector<Rect> rectsWhereRegions;
|
||||
bool shouldHandleResult=separateDetectionWork->communicateWithDetectingThread(imageGray, rectsWhereRegions);
|
||||
|
||||
|
||||
|
||||
if (shouldHandleResult) {
|
||||
LOGD("DetectionBasedTracker::process: get _rectsWhereRegions were got from resultDetect");
|
||||
} else {
|
||||
@ -497,6 +517,7 @@ void DetectionBasedTracker::process(const Mat& imageGray)
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
//correction by speed of rectangle
|
||||
if (n > 1) {
|
||||
Point2f center=centerRect(r);
|
||||
@ -539,7 +560,6 @@ void DetectionBasedTracker::getObjects(std::vector<cv::Rect>& result) const
|
||||
LOGD("DetectionBasedTracker::process: found a object with SIZE %d x %d, rect={%d, %d, %d x %d}", r.width, r.height, r.x, r.y, r.width, r.height);
|
||||
}
|
||||
}
|
||||
|
||||
void DetectionBasedTracker::getObjects(std::vector<Object>& result) const
|
||||
{
|
||||
result.clear();
|
||||
@ -554,6 +574,8 @@ void DetectionBasedTracker::getObjects(std::vector<Object>& result) const
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
bool DetectionBasedTracker::run()
|
||||
{
|
||||
return separateDetectionWork->run();
|
||||
@ -689,7 +711,6 @@ void DetectionBasedTracker::updateTrackedObjects(const vector<Rect>& detectedObj
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Rect DetectionBasedTracker::calcTrackedObjectPositionToShow(int i) const
|
||||
{
|
||||
if ( (i < 0) || (i >= (int)trackedObjects.size()) ) {
|
||||
@ -722,8 +743,8 @@ Rect DetectionBasedTracker::calcTrackedObjectPositionToShow(int i) const
|
||||
double sum=0;
|
||||
for(int j=0; j < Nsize; j++) {
|
||||
int k=N-j-1;
|
||||
w += lastPositions[k].width * weightsSizesSmoothing[j];
|
||||
h += lastPositions[k].height * weightsSizesSmoothing[j];
|
||||
w+= lastPositions[k].width * weightsSizesSmoothing[j];
|
||||
h+= lastPositions[k].height * weightsSizesSmoothing[j];
|
||||
sum+=weightsSizesSmoothing[j];
|
||||
}
|
||||
w /= sum;
|
||||
@ -741,7 +762,7 @@ Rect DetectionBasedTracker::calcTrackedObjectPositionToShow(int i) const
|
||||
Point br(lastPositions[k].br());
|
||||
Point2f c1;
|
||||
c1=tl;
|
||||
c1=c1* 0.5f;
|
||||
c1=c1* 0.5f;
|
||||
Point2f c2;
|
||||
c2=br;
|
||||
c2=c2*0.5f;
|
||||
@ -781,7 +802,8 @@ void DetectionBasedTracker::detectInRegion(const Mat& img, const Rect& r, vector
|
||||
return;
|
||||
}
|
||||
|
||||
int d = cvRound(std::min(r.width, r.height) * innerParameters.coeffObjectSizeToTrack);
|
||||
int d=std::min(r.width, r.height);
|
||||
d=cvRound(d * innerParameters.coeffObjectSizeToTrack);
|
||||
|
||||
vector<Rect> tmpobjects;
|
||||
|
||||
@ -789,17 +811,17 @@ void DetectionBasedTracker::detectInRegion(const Mat& img, const Rect& r, vector
|
||||
LOGD("DetectionBasedTracker::detectInRegion: img1.size()=%d x %d, d=%d",
|
||||
img1.size().width, img1.size().height, d);
|
||||
|
||||
cascadeForTracking->setMinObjectSize(Size(d, d));
|
||||
cascadeForTracking->detect(img1, tmpobjects);
|
||||
/*
|
||||
detectMultiScale( img1, tmpobjects,
|
||||
int maxObjectSize=parameters.maxObjectSize;
|
||||
Size max_objectSize(maxObjectSize, maxObjectSize);
|
||||
|
||||
cascadeForTracking.detectMultiScale( img1, tmpobjects,
|
||||
parameters.scaleFactor, parameters.minNeighbors, 0
|
||||
|CV_HAAR_FIND_BIGGEST_OBJECT
|
||||
|CV_HAAR_SCALE_IMAGE
|
||||
,
|
||||
Size(d,d),
|
||||
max_objectSize
|
||||
);*/
|
||||
);
|
||||
|
||||
for(size_t i=0; i < tmpobjects.size(); i++) {
|
||||
Rect curres(tmpobjects[i].tl() + r1.tl(), tmpobjects[i].size());
|
||||
@ -809,7 +831,10 @@ void DetectionBasedTracker::detectInRegion(const Mat& img, const Rect& r, vector
|
||||
|
||||
bool DetectionBasedTracker::setParameters(const Parameters& params)
|
||||
{
|
||||
if ( params.maxTrackLifetime < 0 )
|
||||
if ( (params.minObjectSize <= 0)
|
||||
|| (params.maxObjectSize < 0)
|
||||
|| (params.scaleFactor <= 1.0)
|
||||
|| (params.maxTrackLifetime < 0) )
|
||||
{
|
||||
LOGE("DetectionBasedTracker::setParameters: ERROR: wrong parameters value");
|
||||
return false;
|
||||
|
@ -1,104 +0,0 @@
|
||||
#if defined(__linux__) || defined(LINUX) || defined(__APPLE__) || defined(ANDROID)
|
||||
|
||||
#include <opencv2/imgproc/imgproc.hpp> // Gaussian Blur
|
||||
#include <opencv2/core/core.hpp> // Basic OpenCV structures (cv::Mat, Scalar)
|
||||
#include <opencv2/highgui/highgui.hpp> // OpenCV window I/O
|
||||
#include <opencv2/features2d/features2d.hpp>
|
||||
#include <opencv2/contrib/detection_based_tracker.hpp>
|
||||
|
||||
#include <stdio.h>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
const string WindowName = "Face Detection example";
|
||||
|
||||
class CascadeDetectorAdapter: public DetectionBasedTracker::IDetector
|
||||
{
|
||||
public:
|
||||
CascadeDetectorAdapter(cv::Ptr<cv::CascadeClassifier> detector):
|
||||
IDetector(),
|
||||
Detector(detector)
|
||||
{
|
||||
CV_Assert(!detector.empty());
|
||||
}
|
||||
|
||||
void detect(const cv::Mat &Image, std::vector<cv::Rect> &objects)
|
||||
{
|
||||
Detector->detectMultiScale(Image, objects, scaleFactor, minNeighbours, 0, minObjSize, maxObjSize);
|
||||
}
|
||||
virtual ~CascadeDetectorAdapter()
|
||||
{}
|
||||
|
||||
private:
|
||||
CascadeDetectorAdapter();
|
||||
cv::Ptr<cv::CascadeClassifier> Detector;
|
||||
};
|
||||
|
||||
int main(int argc, char* argv[])
|
||||
{
|
||||
namedWindow(WindowName);
|
||||
|
||||
VideoCapture VideoStream(0);
|
||||
|
||||
if (!VideoStream.isOpened())
|
||||
{
|
||||
printf("Error: Cannot open video stream from camera\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
std::string cascadeFrontalfilename = "../../data/lbpcascades/lbpcascade_frontalface.xml";
|
||||
cv::Ptr<cv::CascadeClassifier> cascade = new cv::CascadeClassifier(cascadeFrontalfilename);
|
||||
cv::Ptr<DetectionBasedTracker::IDetector> MainDetector = new CascadeDetectorAdapter(cascade);
|
||||
|
||||
cascade = new cv::CascadeClassifier(cascadeFrontalfilename);
|
||||
cv::Ptr<DetectionBasedTracker::IDetector> TrackingDetector = new CascadeDetectorAdapter(cascade);
|
||||
|
||||
DetectionBasedTracker::Parameters params;
|
||||
DetectionBasedTracker Detector(MainDetector, TrackingDetector, params);
|
||||
|
||||
if (!Detector.run())
|
||||
{
|
||||
printf("Error: Detector initialization failed\n");
|
||||
return 2;
|
||||
}
|
||||
|
||||
Mat ReferenceFrame;
|
||||
Mat GrayFrame;
|
||||
vector<Rect> Faces;
|
||||
|
||||
while(true)
|
||||
{
|
||||
VideoStream >> ReferenceFrame;
|
||||
cvtColor(ReferenceFrame, GrayFrame, COLOR_RGB2GRAY);
|
||||
Detector.process(GrayFrame);
|
||||
Detector.getObjects(Faces);
|
||||
|
||||
for (size_t i = 0; i < Faces.size(); i++)
|
||||
{
|
||||
rectangle(ReferenceFrame, Faces[i], CV_RGB(0,255,0));
|
||||
}
|
||||
|
||||
imshow(WindowName, ReferenceFrame);
|
||||
|
||||
if (cvWaitKey(30) >= 0) break;
|
||||
}
|
||||
|
||||
Detector.stop();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#include <stdio.h>
|
||||
int main()
|
||||
{
|
||||
printf("This sample works for UNIX or ANDROID only\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -43,6 +43,8 @@
|
||||
#define LOGE(...) do{} while(0)
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
@ -61,32 +63,9 @@ static void usage()
|
||||
LOGE0("\t (e.g.\"opencv/data/lbpcascades/lbpcascade_frontalface.xml\" ");
|
||||
}
|
||||
|
||||
class CascadeDetectorAdapter: public DetectionBasedTracker::IDetector
|
||||
{
|
||||
public:
|
||||
CascadeDetectorAdapter(cv::Ptr<cv::CascadeClassifier> detector):
|
||||
IDetector(),
|
||||
Detector(detector)
|
||||
{
|
||||
CV_Assert(!detector.empty());
|
||||
}
|
||||
|
||||
void detect(const cv::Mat &Image, std::vector<cv::Rect> &objects)
|
||||
{
|
||||
Detector->detectMultiScale(Image, objects, 1.1, 3, 0, minObjSize, maxObjSize);
|
||||
}
|
||||
virtual ~CascadeDetectorAdapter()
|
||||
{}
|
||||
|
||||
private:
|
||||
CascadeDetectorAdapter();
|
||||
cv::Ptr<cv::CascadeClassifier> Detector;
|
||||
};
|
||||
|
||||
static int test_FaceDetector(int argc, char *argv[])
|
||||
{
|
||||
if (argc < 4)
|
||||
{
|
||||
if (argc < 4) {
|
||||
usage();
|
||||
return -1;
|
||||
}
|
||||
@ -101,14 +80,12 @@ static int test_FaceDetector(int argc, char *argv[])
|
||||
vector<Mat> images;
|
||||
{
|
||||
char filename[256];
|
||||
for(int n=1; ; n++)
|
||||
{
|
||||
for(int n=1; ; n++) {
|
||||
snprintf(filename, sizeof(filename), filepattern, n);
|
||||
LOGD("filename='%s'", filename);
|
||||
Mat m0;
|
||||
m0=imread(filename);
|
||||
if (m0.empty())
|
||||
{
|
||||
if (m0.empty()) {
|
||||
LOGI0("Cannot read the file --- break");
|
||||
break;
|
||||
}
|
||||
@ -117,15 +94,10 @@ static int test_FaceDetector(int argc, char *argv[])
|
||||
LOGD("read %d images", (int)images.size());
|
||||
}
|
||||
|
||||
std::string cascadeFrontalfilename=cascadefile;
|
||||
cv::Ptr<cv::CascadeClassifier> cascade = new cv::CascadeClassifier(cascadeFrontalfilename);
|
||||
cv::Ptr<DetectionBasedTracker::IDetector> MainDetector = new CascadeDetectorAdapter(cascade);
|
||||
|
||||
cascade = new cv::CascadeClassifier(cascadeFrontalfilename);
|
||||
cv::Ptr<DetectionBasedTracker::IDetector> TrackingDetector = new CascadeDetectorAdapter(cascade);
|
||||
|
||||
DetectionBasedTracker::Parameters params;
|
||||
DetectionBasedTracker fd(MainDetector, TrackingDetector, params);
|
||||
std::string cascadeFrontalfilename=cascadefile;
|
||||
|
||||
DetectionBasedTracker fd(cascadeFrontalfilename, params);
|
||||
|
||||
fd.run();
|
||||
|
||||
@ -136,13 +108,12 @@ static int test_FaceDetector(int argc, char *argv[])
|
||||
double freq=getTickFrequency();
|
||||
|
||||
int num_images=images.size();
|
||||
for(int n=1; n <= num_images; n++)
|
||||
{
|
||||
for(int n=1; n <= num_images; n++) {
|
||||
int64 tcur=getTickCount();
|
||||
int64 dt=tcur-tprev;
|
||||
tprev=tcur;
|
||||
double t_ms=((double)dt)/freq * 1000.0;
|
||||
LOGD("\n\nSTEP n=%d from prev step %f ms\n", n, t_ms);
|
||||
LOGD("\n\nSTEP n=%d from prev step %f ms\n\n", n, t_ms);
|
||||
m=images[n-1];
|
||||
CV_Assert(! m.empty());
|
||||
cvtColor(m, gray, CV_BGR2GRAY);
|
||||
@ -152,8 +123,11 @@ static int test_FaceDetector(int argc, char *argv[])
|
||||
vector<Rect> result;
|
||||
fd.getObjects(result);
|
||||
|
||||
for(size_t i=0; i < result.size(); i++)
|
||||
{
|
||||
|
||||
|
||||
|
||||
|
||||
for(size_t i=0; i < result.size(); i++) {
|
||||
Rect r=result[i];
|
||||
CV_Assert(r.area() > 0);
|
||||
Point tl=r.tl();
|
||||
@ -162,14 +136,14 @@ static int test_FaceDetector(int argc, char *argv[])
|
||||
rectangle(m, tl, br, color, 3);
|
||||
}
|
||||
}
|
||||
|
||||
char outfilename[256];
|
||||
for(int n=1; n <= num_images; n++)
|
||||
{
|
||||
snprintf(outfilename, sizeof(outfilename), outfilepattern, n);
|
||||
LOGD("outfilename='%s'", outfilename);
|
||||
m=images[n-1];
|
||||
imwrite(outfilename, m);
|
||||
char outfilename[256];
|
||||
for(int n=1; n <= num_images; n++) {
|
||||
snprintf(outfilename, sizeof(outfilename), outfilepattern, n);
|
||||
LOGD("outfilename='%s'", outfilename);
|
||||
m=images[n-1];
|
||||
imwrite(outfilename, m);
|
||||
}
|
||||
}
|
||||
|
||||
fd.stop();
|
||||
@ -177,6 +151,8 @@ static int test_FaceDetector(int argc, char *argv[])
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
return test_FaceDetector(argc, argv);
|
||||
|
Loading…
Reference in New Issue
Block a user