mirror of
https://github.com/opencv/opencv.git
synced 2025-06-16 14:50:52 +08:00
LBP features integrated in CascadeClassifier_GPU
This commit is contained in:
parent
2dc93574e1
commit
1b7ad93dc9
@ -1397,7 +1397,7 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
////////////////////////////////// CascadeClassifier_GPU //////////////////////////////////////////
|
////////////////////////////////// CascadeClassifier_GPU //////////////////////////////////////////
|
||||||
// The cascade classifier class for object detection.
|
// The cascade classifier class for object detection: supports old haar and new lbp xlm formats and nvbin for haar cascades olny.
|
||||||
class CV_EXPORTS CascadeClassifier_GPU
|
class CV_EXPORTS CascadeClassifier_GPU
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
@ -1410,36 +1410,22 @@ public:
|
|||||||
void release();
|
void release();
|
||||||
|
|
||||||
/* returns number of detected objects */
|
/* returns number of detected objects */
|
||||||
int detectMultiScale(const GpuMat& image, GpuMat& objectsBuf, double scaleFactor=1.2, int minNeighbors=4, Size minSize=Size());
|
int detectMultiScale(const GpuMat& image, GpuMat& objectsBuf, double scaleFactor = 1.2, int minNeighbors = 4, Size minSize = Size());
|
||||||
|
|
||||||
bool findLargestObject;
|
bool findLargestObject;
|
||||||
bool visualizeInPlace;
|
bool visualizeInPlace;
|
||||||
|
|
||||||
Size getClassifierSize() const;
|
Size getClassifierSize() const;
|
||||||
private:
|
|
||||||
|
|
||||||
|
private:
|
||||||
struct CascadeClassifierImpl;
|
struct CascadeClassifierImpl;
|
||||||
CascadeClassifierImpl* impl;
|
CascadeClassifierImpl* impl;
|
||||||
};
|
struct HaarCascade;
|
||||||
|
struct LbpCascade;
|
||||||
|
friend class CascadeClassifier_GPU_LBP;
|
||||||
|
|
||||||
// The cascade classifier class for object detection.
|
|
||||||
class CV_EXPORTS CascadeClassifier_GPU_LBP
|
|
||||||
{
|
|
||||||
public:
|
public:
|
||||||
CascadeClassifier_GPU_LBP(cv::Size detectionFrameSize = cv::Size());
|
int detectMultiScale(const GpuMat& image, GpuMat& objectsBuf, Size maxObjectSize, Size minSize = Size(), double scaleFactor = 1.1, int minNeighbors = 4);
|
||||||
~CascadeClassifier_GPU_LBP();
|
|
||||||
|
|
||||||
bool empty() const;
|
|
||||||
bool load(const std::string& filename);
|
|
||||||
void release();
|
|
||||||
|
|
||||||
int detectMultiScale(const GpuMat& image, GpuMat& objectsBuf, double scaleFactor = 1.1, int minNeighbors = 4,
|
|
||||||
cv::Size maxObjectSize = cv::Size()/*, Size minSize = Size()*/);
|
|
||||||
Size getClassifierSize() const;
|
|
||||||
|
|
||||||
private:
|
|
||||||
struct CascadeClassifierImpl;
|
|
||||||
CascadeClassifierImpl* impl;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
////////////////////////////////// SURF //////////////////////////////////////////
|
////////////////////////////////// SURF //////////////////////////////////////////
|
||||||
|
@ -70,7 +70,7 @@ GPU_PERF_TEST_1(LBPClassifier, cv::gpu::DeviceInfo)
|
|||||||
|
|
||||||
cv::gpu::GpuMat img(img_host);
|
cv::gpu::GpuMat img(img_host);
|
||||||
cv::gpu::GpuMat gpu_rects;
|
cv::gpu::GpuMat gpu_rects;
|
||||||
cv::gpu::CascadeClassifier_GPU_LBP cascade(img.size());
|
cv::gpu::CascadeClassifier_GPU cascade;
|
||||||
ASSERT_TRUE(cascade.load(perf::TestBase::getDataPath("gpu/lbpcascade/lbpcascade_frontalface.xml")));
|
ASSERT_TRUE(cascade.load(perf::TestBase::getDataPath("gpu/lbpcascade/lbpcascade_frontalface.xml")));
|
||||||
|
|
||||||
cascade.detectMultiScale(img, gpu_rects);
|
cascade.detectMultiScale(img, gpu_rects);
|
||||||
|
@ -49,31 +49,239 @@ using namespace cv::gpu;
|
|||||||
using namespace std;
|
using namespace std;
|
||||||
|
|
||||||
#if !defined (HAVE_CUDA)
|
#if !defined (HAVE_CUDA)
|
||||||
// ============ old fashioned haar cascade ==============================================//
|
|
||||||
cv::gpu::CascadeClassifier_GPU::CascadeClassifier_GPU() { throw_nogpu(); }
|
cv::gpu::CascadeClassifier_GPU::CascadeClassifier_GPU() { throw_nogpu(); }
|
||||||
cv::gpu::CascadeClassifier_GPU::CascadeClassifier_GPU(const string&) { throw_nogpu(); }
|
cv::gpu::CascadeClassifier_GPU::CascadeClassifier_GPU(const string&) { throw_nogpu(); }
|
||||||
cv::gpu::CascadeClassifier_GPU::~CascadeClassifier_GPU() { throw_nogpu(); }
|
cv::gpu::CascadeClassifier_GPU::~CascadeClassifier_GPU() { throw_nogpu(); }
|
||||||
|
|
||||||
bool cv::gpu::CascadeClassifier_GPU::empty() const { throw_nogpu(); return true; }
|
bool cv::gpu::CascadeClassifier_GPU::empty() const { throw_nogpu(); return true; }
|
||||||
bool cv::gpu::CascadeClassifier_GPU::load(const string&) { throw_nogpu(); return true; }
|
bool cv::gpu::CascadeClassifier_GPU::load(const string&) { throw_nogpu(); return true; }
|
||||||
Size cv::gpu::CascadeClassifier_GPU::getClassifierSize() const { throw_nogpu(); return Size(); }
|
Size cv::gpu::CascadeClassifier_GPU::getClassifierSize() const { throw_nogpu(); return Size();}
|
||||||
|
void cv::gpu::CascadeClassifier_GPU::release() { throw_nogpu(); }
|
||||||
int cv::gpu::CascadeClassifier_GPU::detectMultiScale( const GpuMat& , GpuMat& , double , int , Size) { throw_nogpu(); return 0; }
|
int cv::gpu::CascadeClassifier_GPU::detectMultiScale( const GpuMat&, GpuMat&, double, int, Size) {throw_nogpu(); return -1;}
|
||||||
|
int cv::gpu::CascadeClassifier_GPU::detectMultiScale( const GpuMat&, GpuMat&, Size, Size, double, int) {throw_nogpu(); return -1;}
|
||||||
// ============ LBP cascade ==============================================//
|
|
||||||
cv::gpu::CascadeClassifier_GPU_LBP::CascadeClassifier_GPU_LBP(cv::Size /*frameSize*/){ throw_nogpu(); }
|
|
||||||
cv::gpu::CascadeClassifier_GPU_LBP::~CascadeClassifier_GPU_LBP() { throw_nogpu(); }
|
|
||||||
|
|
||||||
bool cv::gpu::CascadeClassifier_GPU_LBP::empty() const { throw_nogpu(); return true; }
|
|
||||||
bool cv::gpu::CascadeClassifier_GPU_LBP::load(const string&) { throw_nogpu(); return true; }
|
|
||||||
Size cv::gpu::CascadeClassifier_GPU_LBP::getClassifierSize() const { throw_nogpu(); return Size(); }
|
|
||||||
void cv::gpu::CascadeClassifier_GPU_LBP::allocateBuffers(cv::Size /*frame*/) { throw_nogpu();}
|
|
||||||
|
|
||||||
int cv::gpu::CascadeClassifier_GPU_LBP::detectMultiScale(const cv::gpu::GpuMat& /*image*/, cv::gpu::GpuMat& /*objectsBuf*/,
|
|
||||||
double /*scaleFactor*/, int /*minNeighbors*/, cv::Size /*maxObjectSize*/){ throw_nogpu(); return 0;}
|
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
|
struct cv::gpu::CascadeClassifier_GPU::CascadeClassifierImpl
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
CascadeClassifierImpl(){}
|
||||||
|
virtual ~CascadeClassifierImpl(){}
|
||||||
|
|
||||||
|
virtual unsigned int process(const GpuMat& src, GpuMat& objects, float scaleStep, int minNeighbors,
|
||||||
|
bool findLargestObject, bool visualizeInPlace, cv::Size ncvMinSize, cv::Size maxObjectSize) = 0;
|
||||||
|
|
||||||
|
virtual cv::Size getClassifierCvSize() const = 0;
|
||||||
|
virtual bool read(const string& classifierAsXml) = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct cv::gpu::CascadeClassifier_GPU::HaarCascade : cv::gpu::CascadeClassifier_GPU::CascadeClassifierImpl
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
HaarCascade() : lastAllocatedFrameSize(-1, -1)
|
||||||
|
{
|
||||||
|
ncvSetDebugOutputHandler(NCVDebugOutputHandler);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool read(const string& filename)
|
||||||
|
{
|
||||||
|
ncvSafeCall( load(filename) );
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
NCVStatus process(const GpuMat& src, GpuMat& objects, float scaleStep, int minNeighbors,
|
||||||
|
bool findLargestObject, bool visualizeInPlace, cv::Size ncvMinSize,
|
||||||
|
/*out*/unsigned int& numDetections)
|
||||||
|
{
|
||||||
|
calculateMemReqsAndAllocate(src.size());
|
||||||
|
|
||||||
|
NCVMemPtr src_beg;
|
||||||
|
src_beg.ptr = (void*)src.ptr<Ncv8u>();
|
||||||
|
src_beg.memtype = NCVMemoryTypeDevice;
|
||||||
|
|
||||||
|
NCVMemSegment src_seg;
|
||||||
|
src_seg.begin = src_beg;
|
||||||
|
src_seg.size = src.step * src.rows;
|
||||||
|
|
||||||
|
NCVMatrixReuse<Ncv8u> d_src(src_seg, static_cast<int>(devProp.textureAlignment), src.cols, src.rows, static_cast<int>(src.step), true);
|
||||||
|
ncvAssertReturn(d_src.isMemReused(), NCV_ALLOCATOR_BAD_REUSE);
|
||||||
|
|
||||||
|
CV_Assert(objects.rows == 1);
|
||||||
|
|
||||||
|
NCVMemPtr objects_beg;
|
||||||
|
objects_beg.ptr = (void*)objects.ptr<NcvRect32u>();
|
||||||
|
objects_beg.memtype = NCVMemoryTypeDevice;
|
||||||
|
|
||||||
|
NCVMemSegment objects_seg;
|
||||||
|
objects_seg.begin = objects_beg;
|
||||||
|
objects_seg.size = objects.step * objects.rows;
|
||||||
|
NCVVectorReuse<NcvRect32u> d_rects(objects_seg, objects.cols);
|
||||||
|
ncvAssertReturn(d_rects.isMemReused(), NCV_ALLOCATOR_BAD_REUSE);
|
||||||
|
|
||||||
|
NcvSize32u roi;
|
||||||
|
roi.width = d_src.width();
|
||||||
|
roi.height = d_src.height();
|
||||||
|
|
||||||
|
NcvSize32u winMinSize(ncvMinSize.width, ncvMinSize.height);
|
||||||
|
|
||||||
|
Ncv32u flags = 0;
|
||||||
|
flags |= findLargestObject? NCVPipeObjDet_FindLargestObject : 0;
|
||||||
|
flags |= visualizeInPlace ? NCVPipeObjDet_VisualizeInPlace : 0;
|
||||||
|
|
||||||
|
ncvStat = ncvDetectObjectsMultiScale_device(
|
||||||
|
d_src, roi, d_rects, numDetections, haar, *h_haarStages,
|
||||||
|
*d_haarStages, *d_haarNodes, *d_haarFeatures,
|
||||||
|
winMinSize,
|
||||||
|
minNeighbors,
|
||||||
|
scaleStep, 1,
|
||||||
|
flags,
|
||||||
|
*gpuAllocator, *cpuAllocator, devProp, 0);
|
||||||
|
ncvAssertReturnNcvStat(ncvStat);
|
||||||
|
ncvAssertCUDAReturn(cudaStreamSynchronize(0), NCV_CUDA_ERROR);
|
||||||
|
|
||||||
|
return NCV_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned int process(const GpuMat& image, GpuMat& objectsBuf, float scaleFactor, int minNeighbors,
|
||||||
|
bool findLargestObject, bool visualizeInPlace, cv::Size minSize, cv::Size maxObjectSize)
|
||||||
|
{
|
||||||
|
CV_Assert( scaleFactor > 1 && image.depth() == CV_8U);
|
||||||
|
|
||||||
|
const int defaultObjSearchNum = 100;
|
||||||
|
if (objectsBuf.empty())
|
||||||
|
{
|
||||||
|
objectsBuf.create(1, defaultObjSearchNum, DataType<Rect>::type);
|
||||||
|
}
|
||||||
|
|
||||||
|
cv::Size ncvMinSize = this->getClassifierCvSize();
|
||||||
|
|
||||||
|
if (ncvMinSize.width < (unsigned)minSize.width && ncvMinSize.height < (unsigned)minSize.height)
|
||||||
|
{
|
||||||
|
ncvMinSize.width = minSize.width;
|
||||||
|
ncvMinSize.height = minSize.height;
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned int numDetections;
|
||||||
|
ncvSafeCall(this->process(image, objectsBuf, (float)scaleFactor, minNeighbors, findLargestObject, visualizeInPlace, ncvMinSize, numDetections));
|
||||||
|
|
||||||
|
return numDetections;
|
||||||
|
}
|
||||||
|
|
||||||
|
cv::Size getClassifierCvSize() const { return cv::Size(haar.ClassifierSize.width, haar.ClassifierSize.height); }
|
||||||
|
|
||||||
|
private:
|
||||||
|
static void NCVDebugOutputHandler(const std::string &msg) { CV_Error(CV_GpuApiCallError, msg.c_str()); }
|
||||||
|
|
||||||
|
NCVStatus load(const string& classifierFile)
|
||||||
|
{
|
||||||
|
int devId = cv::gpu::getDevice();
|
||||||
|
ncvAssertCUDAReturn(cudaGetDeviceProperties(&devProp, devId), NCV_CUDA_ERROR);
|
||||||
|
|
||||||
|
// Load the classifier from file (assuming its size is about 1 mb) using a simple allocator
|
||||||
|
gpuCascadeAllocator = new NCVMemNativeAllocator(NCVMemoryTypeDevice, static_cast<int>(devProp.textureAlignment));
|
||||||
|
cpuCascadeAllocator = new NCVMemNativeAllocator(NCVMemoryTypeHostPinned, static_cast<int>(devProp.textureAlignment));
|
||||||
|
|
||||||
|
ncvAssertPrintReturn(gpuCascadeAllocator->isInitialized(), "Error creating cascade GPU allocator", NCV_CUDA_ERROR);
|
||||||
|
ncvAssertPrintReturn(cpuCascadeAllocator->isInitialized(), "Error creating cascade CPU allocator", NCV_CUDA_ERROR);
|
||||||
|
|
||||||
|
Ncv32u haarNumStages, haarNumNodes, haarNumFeatures;
|
||||||
|
ncvStat = ncvHaarGetClassifierSize(classifierFile, haarNumStages, haarNumNodes, haarNumFeatures);
|
||||||
|
ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "Error reading classifier size (check the file)", NCV_FILE_ERROR);
|
||||||
|
|
||||||
|
h_haarStages = new NCVVectorAlloc<HaarStage64>(*cpuCascadeAllocator, haarNumStages);
|
||||||
|
h_haarNodes = new NCVVectorAlloc<HaarClassifierNode128>(*cpuCascadeAllocator, haarNumNodes);
|
||||||
|
h_haarFeatures = new NCVVectorAlloc<HaarFeature64>(*cpuCascadeAllocator, haarNumFeatures);
|
||||||
|
|
||||||
|
ncvAssertPrintReturn(h_haarStages->isMemAllocated(), "Error in cascade CPU allocator", NCV_CUDA_ERROR);
|
||||||
|
ncvAssertPrintReturn(h_haarNodes->isMemAllocated(), "Error in cascade CPU allocator", NCV_CUDA_ERROR);
|
||||||
|
ncvAssertPrintReturn(h_haarFeatures->isMemAllocated(), "Error in cascade CPU allocator", NCV_CUDA_ERROR);
|
||||||
|
|
||||||
|
ncvStat = ncvHaarLoadFromFile_host(classifierFile, haar, *h_haarStages, *h_haarNodes, *h_haarFeatures);
|
||||||
|
ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "Error loading classifier", NCV_FILE_ERROR);
|
||||||
|
|
||||||
|
d_haarStages = new NCVVectorAlloc<HaarStage64>(*gpuCascadeAllocator, haarNumStages);
|
||||||
|
d_haarNodes = new NCVVectorAlloc<HaarClassifierNode128>(*gpuCascadeAllocator, haarNumNodes);
|
||||||
|
d_haarFeatures = new NCVVectorAlloc<HaarFeature64>(*gpuCascadeAllocator, haarNumFeatures);
|
||||||
|
|
||||||
|
ncvAssertPrintReturn(d_haarStages->isMemAllocated(), "Error in cascade GPU allocator", NCV_CUDA_ERROR);
|
||||||
|
ncvAssertPrintReturn(d_haarNodes->isMemAllocated(), "Error in cascade GPU allocator", NCV_CUDA_ERROR);
|
||||||
|
ncvAssertPrintReturn(d_haarFeatures->isMemAllocated(), "Error in cascade GPU allocator", NCV_CUDA_ERROR);
|
||||||
|
|
||||||
|
ncvStat = h_haarStages->copySolid(*d_haarStages, 0);
|
||||||
|
ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "Error copying cascade to GPU", NCV_CUDA_ERROR);
|
||||||
|
ncvStat = h_haarNodes->copySolid(*d_haarNodes, 0);
|
||||||
|
ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "Error copying cascade to GPU", NCV_CUDA_ERROR);
|
||||||
|
ncvStat = h_haarFeatures->copySolid(*d_haarFeatures, 0);
|
||||||
|
ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "Error copying cascade to GPU", NCV_CUDA_ERROR);
|
||||||
|
|
||||||
|
return NCV_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
NCVStatus calculateMemReqsAndAllocate(const Size& frameSize)
|
||||||
|
{
|
||||||
|
if (lastAllocatedFrameSize == frameSize)
|
||||||
|
{
|
||||||
|
return NCV_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate memory requirements and create real allocators
|
||||||
|
NCVMemStackAllocator gpuCounter(static_cast<int>(devProp.textureAlignment));
|
||||||
|
NCVMemStackAllocator cpuCounter(static_cast<int>(devProp.textureAlignment));
|
||||||
|
|
||||||
|
ncvAssertPrintReturn(gpuCounter.isInitialized(), "Error creating GPU memory counter", NCV_CUDA_ERROR);
|
||||||
|
ncvAssertPrintReturn(cpuCounter.isInitialized(), "Error creating CPU memory counter", NCV_CUDA_ERROR);
|
||||||
|
|
||||||
|
NCVMatrixAlloc<Ncv8u> d_src(gpuCounter, frameSize.width, frameSize.height);
|
||||||
|
NCVMatrixAlloc<Ncv8u> h_src(cpuCounter, frameSize.width, frameSize.height);
|
||||||
|
|
||||||
|
ncvAssertReturn(d_src.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
|
||||||
|
ncvAssertReturn(h_src.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
|
||||||
|
|
||||||
|
NCVVectorAlloc<NcvRect32u> d_rects(gpuCounter, 100);
|
||||||
|
ncvAssertReturn(d_rects.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
|
||||||
|
|
||||||
|
NcvSize32u roi;
|
||||||
|
roi.width = d_src.width();
|
||||||
|
roi.height = d_src.height();
|
||||||
|
Ncv32u numDetections;
|
||||||
|
ncvStat = ncvDetectObjectsMultiScale_device(d_src, roi, d_rects, numDetections, haar, *h_haarStages,
|
||||||
|
*d_haarStages, *d_haarNodes, *d_haarFeatures, haar.ClassifierSize, 4, 1.2f, 1, 0, gpuCounter, cpuCounter, devProp, 0);
|
||||||
|
|
||||||
|
ncvAssertReturnNcvStat(ncvStat);
|
||||||
|
ncvAssertCUDAReturn(cudaStreamSynchronize(0), NCV_CUDA_ERROR);
|
||||||
|
|
||||||
|
gpuAllocator = new NCVMemStackAllocator(NCVMemoryTypeDevice, gpuCounter.maxSize(), static_cast<int>(devProp.textureAlignment));
|
||||||
|
cpuAllocator = new NCVMemStackAllocator(NCVMemoryTypeHostPinned, cpuCounter.maxSize(), static_cast<int>(devProp.textureAlignment));
|
||||||
|
|
||||||
|
ncvAssertPrintReturn(gpuAllocator->isInitialized(), "Error creating GPU memory allocator", NCV_CUDA_ERROR);
|
||||||
|
ncvAssertPrintReturn(cpuAllocator->isInitialized(), "Error creating CPU memory allocator", NCV_CUDA_ERROR);
|
||||||
|
return NCV_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
cudaDeviceProp devProp;
|
||||||
|
NCVStatus ncvStat;
|
||||||
|
|
||||||
|
Ptr<NCVMemNativeAllocator> gpuCascadeAllocator;
|
||||||
|
Ptr<NCVMemNativeAllocator> cpuCascadeAllocator;
|
||||||
|
|
||||||
|
Ptr<NCVVectorAlloc<HaarStage64> > h_haarStages;
|
||||||
|
Ptr<NCVVectorAlloc<HaarClassifierNode128> > h_haarNodes;
|
||||||
|
Ptr<NCVVectorAlloc<HaarFeature64> > h_haarFeatures;
|
||||||
|
|
||||||
|
HaarClassifierCascadeDescriptor haar;
|
||||||
|
|
||||||
|
Ptr<NCVVectorAlloc<HaarStage64> > d_haarStages;
|
||||||
|
Ptr<NCVVectorAlloc<HaarClassifierNode128> > d_haarNodes;
|
||||||
|
Ptr<NCVVectorAlloc<HaarFeature64> > d_haarFeatures;
|
||||||
|
|
||||||
|
Size lastAllocatedFrameSize;
|
||||||
|
|
||||||
|
Ptr<NCVMemStackAllocator> gpuAllocator;
|
||||||
|
Ptr<NCVMemStackAllocator> cpuAllocator;
|
||||||
|
|
||||||
|
virtual ~HaarCascade(){}
|
||||||
|
};
|
||||||
|
|
||||||
cv::Size operator -(const cv::Size& a, const cv::Size& b)
|
cv::Size operator -(const cv::Size& a, const cv::Size& b)
|
||||||
{
|
{
|
||||||
return cv::Size(a.width - b.width, a.height - b.height);
|
return cv::Size(a.width - b.width, a.height - b.height);
|
||||||
@ -101,12 +309,17 @@ bool operator <=(const cv::Size& a, const cv::Size& b)
|
|||||||
|
|
||||||
struct PyrLavel
|
struct PyrLavel
|
||||||
{
|
{
|
||||||
PyrLavel(int _order, float _scale, cv::Size frame, cv::Size window) : order(_order)
|
PyrLavel(int _order, float _scale, cv::Size frame, cv::Size window, cv::Size minObjectSize)
|
||||||
{
|
{
|
||||||
|
do
|
||||||
|
{
|
||||||
|
order = _order;
|
||||||
scale = pow(_scale, order);
|
scale = pow(_scale, order);
|
||||||
sFrame = frame / scale;
|
sFrame = frame / scale;
|
||||||
workArea = sFrame - window + 1;
|
workArea = sFrame - window + 1;
|
||||||
sWindow = window * scale;
|
sWindow = window * scale;
|
||||||
|
_order++;
|
||||||
|
} while (sWindow <= minObjectSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool isFeasible(cv::Size maxObj)
|
bool isFeasible(cv::Size maxObj)
|
||||||
@ -114,9 +327,9 @@ struct PyrLavel
|
|||||||
return workArea.width > 0 && workArea.height > 0 && sWindow <= maxObj;
|
return workArea.width > 0 && workArea.height > 0 && sWindow <= maxObj;
|
||||||
}
|
}
|
||||||
|
|
||||||
PyrLavel next(float factor, cv::Size frame, cv::Size window)
|
PyrLavel next(float factor, cv::Size frame, cv::Size window, cv::Size minObjectSize)
|
||||||
{
|
{
|
||||||
return PyrLavel(order + 1, factor, frame, window);
|
return PyrLavel(order + 1, factor, frame, window, minObjectSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
int order;
|
int order;
|
||||||
@ -152,7 +365,7 @@ namespace cv { namespace gpu { namespace device
|
|||||||
}
|
}
|
||||||
}}}
|
}}}
|
||||||
|
|
||||||
struct cv::gpu::CascadeClassifier_GPU_LBP::CascadeClassifierImpl
|
struct cv::gpu::CascadeClassifier_GPU::LbpCascade : cv::gpu::CascadeClassifier_GPU::CascadeClassifierImpl
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
struct Stage
|
struct Stage
|
||||||
@ -162,44 +375,98 @@ public:
|
|||||||
float threshold;
|
float threshold;
|
||||||
};
|
};
|
||||||
|
|
||||||
bool read(const FileNode &root);
|
LbpCascade(){}
|
||||||
void allocateBuffers(cv::Size frame = cv::Size());
|
virtual ~LbpCascade(){}
|
||||||
bool empty() const {return stage_mat.empty();}
|
|
||||||
|
|
||||||
int process(const GpuMat& image, GpuMat& objects, double scaleFactor, int groupThreshold, cv::Size maxObjectSize);
|
virtual unsigned int process(const GpuMat& image, GpuMat& objects, float scaleFactor, int groupThreshold, bool findLargestObject,
|
||||||
|
bool visualizeInPlace, cv::Size minObjectSize, cv::Size maxObjectSize)
|
||||||
|
{
|
||||||
|
CV_Assert(scaleFactor > 1 && image.depth() == CV_8U);
|
||||||
|
|
||||||
|
const int defaultObjSearchNum = 100;
|
||||||
|
const float grouping_eps = 0.2f;
|
||||||
|
|
||||||
|
if( !objects.empty() && objects.depth() == CV_32S)
|
||||||
|
objects.reshape(4, 1);
|
||||||
|
else
|
||||||
|
objects.create(1 , image.cols >> 4, CV_32SC4);
|
||||||
|
|
||||||
|
// used for debug
|
||||||
|
// candidates.setTo(cv::Scalar::all(0));
|
||||||
|
// objects.setTo(cv::Scalar::all(0));
|
||||||
|
|
||||||
|
if (maxObjectSize == cv::Size())
|
||||||
|
maxObjectSize = image.size();
|
||||||
|
|
||||||
|
allocateBuffers(image.size());
|
||||||
|
|
||||||
|
unsigned int classified = 0;
|
||||||
|
GpuMat dclassified(1, 1, CV_32S);
|
||||||
|
cudaSafeCall( cudaMemcpy(dclassified.ptr(), &classified, sizeof(int), cudaMemcpyHostToDevice) );
|
||||||
|
|
||||||
|
PyrLavel level(0, 1.0f, image.size(), NxM, minObjectSize);
|
||||||
|
|
||||||
|
while (level.isFeasible(maxObjectSize))
|
||||||
|
{
|
||||||
|
int acc = level.sFrame.width + 1;
|
||||||
|
float iniScale = level.scale;
|
||||||
|
|
||||||
|
cv::Size area = level.workArea;
|
||||||
|
int step = 1 + (level.scale <= 2.f);
|
||||||
|
|
||||||
|
int total = 0, prev = 0;
|
||||||
|
|
||||||
|
while (acc <= integralFactor * (image.cols + 1) && level.isFeasible(maxObjectSize))
|
||||||
|
{
|
||||||
|
// create sutable matrix headers
|
||||||
|
GpuMat src = resuzeBuffer(cv::Rect(0, 0, level.sFrame.width, level.sFrame.height));
|
||||||
|
GpuMat sint = integral(cv::Rect(prev, 0, level.sFrame.width + 1, level.sFrame.height + 1));
|
||||||
|
GpuMat buff = integralBuffer;
|
||||||
|
|
||||||
|
// generate integral for scale
|
||||||
|
gpu::resize(image, src, level.sFrame, 0, 0, CV_INTER_LINEAR);
|
||||||
|
gpu::integralBuffered(src, sint, buff);
|
||||||
|
|
||||||
|
// calculate job
|
||||||
|
int totalWidth = level.workArea.width / step;
|
||||||
|
total += totalWidth * (level.workArea.height / step);
|
||||||
|
|
||||||
|
// go to next pyramide level
|
||||||
|
level = level.next(scaleFactor, image.size(), NxM, minObjectSize);
|
||||||
|
area = level.workArea;
|
||||||
|
|
||||||
|
step = (1 + (level.scale <= 2.f));
|
||||||
|
prev = acc;
|
||||||
|
acc += level.sFrame.width + 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
device::lbp::classifyPyramid(image.cols, image.rows, NxM.width - 1, NxM.height - 1, iniScale, scaleFactor, total, stage_mat, stage_mat.cols / sizeof(Stage), nodes_mat,
|
||||||
|
leaves_mat, subsets_mat, features_mat, subsetSize, candidates, dclassified.ptr<unsigned int>(), integral);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (groupThreshold <= 0 || objects.empty())
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
cudaSafeCall( cudaMemcpy(&classified, dclassified.ptr(), sizeof(int), cudaMemcpyDeviceToHost) );
|
||||||
|
device::lbp::connectedConmonents(candidates, classified, objects, groupThreshold, grouping_eps, dclassified.ptr<unsigned int>());
|
||||||
|
|
||||||
|
cudaSafeCall( cudaMemcpy(&classified, dclassified.ptr(), sizeof(int), cudaMemcpyDeviceToHost) );
|
||||||
|
cudaSafeCall( cudaDeviceSynchronize() );
|
||||||
|
return classified;
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual cv::Size getClassifierCvSize() const { return NxM; }
|
||||||
|
|
||||||
|
bool read(const string& classifierAsXml)
|
||||||
|
{
|
||||||
|
FileStorage fs(classifierAsXml, FileStorage::READ);
|
||||||
|
return fs.isOpened() ? read(fs.getFirstTopLevelNode()) : false;
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
enum stage { BOOST = 0 };
|
void allocateBuffers(cv::Size frame)
|
||||||
enum feature { LBP = 0 };
|
{
|
||||||
|
|
||||||
static const stage stageType = BOOST;
|
|
||||||
static const feature featureType = LBP;
|
|
||||||
|
|
||||||
cv::Size NxM;
|
|
||||||
bool isStumps;
|
|
||||||
int ncategories;
|
|
||||||
int subsetSize;
|
|
||||||
int nodeStep;
|
|
||||||
|
|
||||||
// gpu representation of classifier
|
|
||||||
GpuMat stage_mat;
|
|
||||||
GpuMat trees_mat;
|
|
||||||
GpuMat nodes_mat;
|
|
||||||
GpuMat leaves_mat;
|
|
||||||
GpuMat subsets_mat;
|
|
||||||
GpuMat features_mat;
|
|
||||||
|
|
||||||
GpuMat integral;
|
|
||||||
GpuMat integralBuffer;
|
|
||||||
GpuMat resuzeBuffer;
|
|
||||||
|
|
||||||
GpuMat candidates;
|
|
||||||
static const int integralFactor = 4;
|
|
||||||
};
|
|
||||||
|
|
||||||
void cv::gpu::CascadeClassifier_GPU_LBP::CascadeClassifierImpl::allocateBuffers(cv::Size frame)
|
|
||||||
{
|
|
||||||
if (frame == cv::Size())
|
if (frame == cv::Size())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@ -221,11 +488,10 @@ void cv::gpu::CascadeClassifier_GPU_LBP::CascadeClassifierImpl::allocateBuffers(
|
|||||||
|
|
||||||
candidates.create(1 , frame.width >> 1, CV_32SC4);
|
candidates.create(1 , frame.width >> 1, CV_32SC4);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// currently only stump based boost classifiers are supported
|
bool read(const FileNode &root)
|
||||||
bool CascadeClassifier_GPU_LBP::CascadeClassifierImpl::read(const FileNode &root)
|
{
|
||||||
{
|
|
||||||
const char *GPU_CC_STAGE_TYPE = "stageType";
|
const char *GPU_CC_STAGE_TYPE = "stageType";
|
||||||
const char *GPU_CC_FEATURE_TYPE = "featureType";
|
const char *GPU_CC_FEATURE_TYPE = "featureType";
|
||||||
const char *GPU_CC_BOOST = "BOOST";
|
const char *GPU_CC_BOOST = "BOOST";
|
||||||
@ -363,336 +629,97 @@ bool CascadeClassifier_GPU_LBP::CascadeClassifierImpl::read(const FileNode &root
|
|||||||
features_mat.upload(cv::Mat(features).reshape(4,1));
|
features_mat.upload(cv::Mat(features).reshape(4,1));
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
|
||||||
|
|
||||||
int cv::gpu::CascadeClassifier_GPU_LBP::CascadeClassifierImpl::process(const GpuMat& image, GpuMat& objects, double scaleFactor, int groupThreshold, cv::Size maxObjectSize)
|
|
||||||
{
|
|
||||||
CV_Assert(!empty() && scaleFactor > 1 && image.depth() == CV_8U);
|
|
||||||
|
|
||||||
const int defaultObjSearchNum = 100;
|
|
||||||
const float grouping_eps = 0.2f;
|
|
||||||
|
|
||||||
if( !objects.empty() && objects.depth() == CV_32S)
|
|
||||||
objects.reshape(4, 1);
|
|
||||||
else
|
|
||||||
objects.create(1 , image.cols >> 4, CV_32SC4);
|
|
||||||
|
|
||||||
// used for debug
|
|
||||||
// candidates.setTo(cv::Scalar::all(0));
|
|
||||||
// objects.setTo(cv::Scalar::all(0));
|
|
||||||
|
|
||||||
if (maxObjectSize == cv::Size())
|
|
||||||
maxObjectSize = image.size();
|
|
||||||
|
|
||||||
allocateBuffers(image.size());
|
|
||||||
|
|
||||||
unsigned int classified = 0;
|
|
||||||
GpuMat dclassified(1, 1, CV_32S);
|
|
||||||
cudaSafeCall( cudaMemcpy(dclassified.ptr(), &classified, sizeof(int), cudaMemcpyHostToDevice) );
|
|
||||||
|
|
||||||
PyrLavel level(0, 1.0f, image.size(), NxM);
|
|
||||||
|
|
||||||
while (level.isFeasible(maxObjectSize))
|
|
||||||
{
|
|
||||||
int acc = level.sFrame.width + 1;
|
|
||||||
float iniScale = level.scale;
|
|
||||||
|
|
||||||
cv::Size area = level.workArea;
|
|
||||||
int step = 1 + (level.scale <= 2.f);
|
|
||||||
|
|
||||||
int total = 0, prev = 0;
|
|
||||||
|
|
||||||
while (acc <= integralFactor * (image.cols + 1) && level.isFeasible(maxObjectSize))
|
|
||||||
{
|
|
||||||
// create sutable matrix headers
|
|
||||||
GpuMat src = resuzeBuffer(cv::Rect(0, 0, level.sFrame.width, level.sFrame.height));
|
|
||||||
GpuMat sint = integral(cv::Rect(prev, 0, level.sFrame.width + 1, level.sFrame.height + 1));
|
|
||||||
GpuMat buff = integralBuffer;
|
|
||||||
|
|
||||||
// generate integral for scale
|
|
||||||
gpu::resize(image, src, level.sFrame, 0, 0, CV_INTER_LINEAR);
|
|
||||||
gpu::integralBuffered(src, sint, buff);
|
|
||||||
|
|
||||||
// calculate job
|
|
||||||
int totalWidth = level.workArea.width / step;
|
|
||||||
// totalWidth = ((totalWidth + WARP_MASK) / WARP_SIZE) << WARP_LOG;
|
|
||||||
|
|
||||||
total += totalWidth * (level.workArea.height / step);
|
|
||||||
|
|
||||||
// go to next pyramide level
|
|
||||||
level = level.next(scaleFactor, image.size(), NxM);
|
|
||||||
area = level.workArea;
|
|
||||||
|
|
||||||
step = (1 + (level.scale <= 2.f));
|
|
||||||
prev = acc;
|
|
||||||
acc += level.sFrame.width + 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
device::lbp::classifyPyramid(image.cols, image.rows, NxM.width - 1, NxM.height - 1, iniScale, scaleFactor, total, stage_mat, stage_mat.cols / sizeof(Stage), nodes_mat,
|
enum stage { BOOST = 0 };
|
||||||
leaves_mat, subsets_mat, features_mat, subsetSize, candidates, dclassified.ptr<unsigned int>(), integral);
|
enum feature { LBP = 1, HAAR = 2 };
|
||||||
}
|
static const stage stageType = BOOST;
|
||||||
|
static const feature featureType = LBP;
|
||||||
|
|
||||||
if (groupThreshold <= 0 || objects.empty())
|
cv::Size NxM;
|
||||||
return 0;
|
bool isStumps;
|
||||||
|
int ncategories;
|
||||||
|
int subsetSize;
|
||||||
|
int nodeStep;
|
||||||
|
|
||||||
cudaSafeCall( cudaMemcpy(&classified, dclassified.ptr(), sizeof(int), cudaMemcpyDeviceToHost) );
|
// gpu representation of classifier
|
||||||
device::lbp::connectedConmonents(candidates, classified, objects, groupThreshold, grouping_eps, dclassified.ptr<unsigned int>());
|
GpuMat stage_mat;
|
||||||
|
GpuMat trees_mat;
|
||||||
|
GpuMat nodes_mat;
|
||||||
|
GpuMat leaves_mat;
|
||||||
|
GpuMat subsets_mat;
|
||||||
|
GpuMat features_mat;
|
||||||
|
|
||||||
// candidates.copyTo(objects);
|
GpuMat integral;
|
||||||
cudaSafeCall( cudaMemcpy(&classified, dclassified.ptr(), sizeof(int), cudaMemcpyDeviceToHost) );
|
GpuMat integralBuffer;
|
||||||
cudaSafeCall( cudaDeviceSynchronize() );
|
GpuMat resuzeBuffer;
|
||||||
return classified;
|
|
||||||
}
|
|
||||||
|
|
||||||
cv::gpu::CascadeClassifier_GPU_LBP::CascadeClassifier_GPU_LBP(cv::Size detectionFrameSize) : impl(new CascadeClassifierImpl()) { (*impl).allocateBuffers(detectionFrameSize); }
|
GpuMat candidates;
|
||||||
cv::gpu::CascadeClassifier_GPU_LBP::~CascadeClassifier_GPU_LBP(){ delete impl; }
|
static const int integralFactor = 4;
|
||||||
|
|
||||||
|
|
||||||
bool cv::gpu::CascadeClassifier_GPU_LBP::empty() const
|
|
||||||
{
|
|
||||||
return (*impl).empty();
|
|
||||||
}
|
|
||||||
|
|
||||||
bool cv::gpu::CascadeClassifier_GPU_LBP::load(const string& classifierAsXml)
|
|
||||||
{
|
|
||||||
FileStorage fs(classifierAsXml, FileStorage::READ);
|
|
||||||
return fs.isOpened() ? (*impl).read(fs.getFirstTopLevelNode()) : false;
|
|
||||||
}
|
|
||||||
|
|
||||||
int cv::gpu::CascadeClassifier_GPU_LBP::detectMultiScale(const GpuMat& image, GpuMat& objects, double scaleFactor, int groupThreshold, cv::Size maxObjectSize)
|
|
||||||
{
|
|
||||||
return (*impl).process(image, objects, scaleFactor, groupThreshold, maxObjectSize);
|
|
||||||
}
|
|
||||||
|
|
||||||
// ============ old fashioned haar cascade ==============================================//
|
|
||||||
struct cv::gpu::CascadeClassifier_GPU::CascadeClassifierImpl
|
|
||||||
{
|
|
||||||
CascadeClassifierImpl(const string& filename) : lastAllocatedFrameSize(-1, -1)
|
|
||||||
{
|
|
||||||
ncvSetDebugOutputHandler(NCVDebugOutputHandler);
|
|
||||||
ncvSafeCall( load(filename) );
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
NCVStatus process(const GpuMat& src, GpuMat& objects, float scaleStep, int minNeighbors,
|
|
||||||
bool findLargestObject, bool visualizeInPlace, NcvSize32u ncvMinSize,
|
|
||||||
/*out*/unsigned int& numDetections)
|
|
||||||
{
|
|
||||||
calculateMemReqsAndAllocate(src.size());
|
|
||||||
|
|
||||||
NCVMemPtr src_beg;
|
|
||||||
src_beg.ptr = (void*)src.ptr<Ncv8u>();
|
|
||||||
src_beg.memtype = NCVMemoryTypeDevice;
|
|
||||||
|
|
||||||
NCVMemSegment src_seg;
|
|
||||||
src_seg.begin = src_beg;
|
|
||||||
src_seg.size = src.step * src.rows;
|
|
||||||
|
|
||||||
NCVMatrixReuse<Ncv8u> d_src(src_seg, static_cast<int>(devProp.textureAlignment), src.cols, src.rows, static_cast<int>(src.step), true);
|
|
||||||
ncvAssertReturn(d_src.isMemReused(), NCV_ALLOCATOR_BAD_REUSE);
|
|
||||||
|
|
||||||
CV_Assert(objects.rows == 1);
|
|
||||||
|
|
||||||
NCVMemPtr objects_beg;
|
|
||||||
objects_beg.ptr = (void*)objects.ptr<NcvRect32u>();
|
|
||||||
objects_beg.memtype = NCVMemoryTypeDevice;
|
|
||||||
|
|
||||||
NCVMemSegment objects_seg;
|
|
||||||
objects_seg.begin = objects_beg;
|
|
||||||
objects_seg.size = objects.step * objects.rows;
|
|
||||||
NCVVectorReuse<NcvRect32u> d_rects(objects_seg, objects.cols);
|
|
||||||
ncvAssertReturn(d_rects.isMemReused(), NCV_ALLOCATOR_BAD_REUSE);
|
|
||||||
|
|
||||||
NcvSize32u roi;
|
|
||||||
roi.width = d_src.width();
|
|
||||||
roi.height = d_src.height();
|
|
||||||
|
|
||||||
Ncv32u flags = 0;
|
|
||||||
flags |= findLargestObject? NCVPipeObjDet_FindLargestObject : 0;
|
|
||||||
flags |= visualizeInPlace ? NCVPipeObjDet_VisualizeInPlace : 0;
|
|
||||||
|
|
||||||
ncvStat = ncvDetectObjectsMultiScale_device(
|
|
||||||
d_src, roi, d_rects, numDetections, haar, *h_haarStages,
|
|
||||||
*d_haarStages, *d_haarNodes, *d_haarFeatures,
|
|
||||||
ncvMinSize,
|
|
||||||
minNeighbors,
|
|
||||||
scaleStep, 1,
|
|
||||||
flags,
|
|
||||||
*gpuAllocator, *cpuAllocator, devProp, 0);
|
|
||||||
ncvAssertReturnNcvStat(ncvStat);
|
|
||||||
ncvAssertCUDAReturn(cudaStreamSynchronize(0), NCV_CUDA_ERROR);
|
|
||||||
|
|
||||||
return NCV_SUCCESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
NcvSize32u getClassifierSize() const { return haar.ClassifierSize; }
|
|
||||||
cv::Size getClassifierCvSize() const { return cv::Size(haar.ClassifierSize.width, haar.ClassifierSize.height); }
|
|
||||||
|
|
||||||
|
|
||||||
private:
|
|
||||||
|
|
||||||
|
|
||||||
static void NCVDebugOutputHandler(const std::string &msg) { CV_Error(CV_GpuApiCallError, msg.c_str()); }
|
|
||||||
|
|
||||||
|
|
||||||
NCVStatus load(const string& classifierFile)
|
|
||||||
{
|
|
||||||
int devId = cv::gpu::getDevice();
|
|
||||||
ncvAssertCUDAReturn(cudaGetDeviceProperties(&devProp, devId), NCV_CUDA_ERROR);
|
|
||||||
|
|
||||||
// Load the classifier from file (assuming its size is about 1 mb) using a simple allocator
|
|
||||||
gpuCascadeAllocator = new NCVMemNativeAllocator(NCVMemoryTypeDevice, static_cast<int>(devProp.textureAlignment));
|
|
||||||
cpuCascadeAllocator = new NCVMemNativeAllocator(NCVMemoryTypeHostPinned, static_cast<int>(devProp.textureAlignment));
|
|
||||||
|
|
||||||
ncvAssertPrintReturn(gpuCascadeAllocator->isInitialized(), "Error creating cascade GPU allocator", NCV_CUDA_ERROR);
|
|
||||||
ncvAssertPrintReturn(cpuCascadeAllocator->isInitialized(), "Error creating cascade CPU allocator", NCV_CUDA_ERROR);
|
|
||||||
|
|
||||||
Ncv32u haarNumStages, haarNumNodes, haarNumFeatures;
|
|
||||||
ncvStat = ncvHaarGetClassifierSize(classifierFile, haarNumStages, haarNumNodes, haarNumFeatures);
|
|
||||||
ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "Error reading classifier size (check the file)", NCV_FILE_ERROR);
|
|
||||||
|
|
||||||
h_haarStages = new NCVVectorAlloc<HaarStage64>(*cpuCascadeAllocator, haarNumStages);
|
|
||||||
h_haarNodes = new NCVVectorAlloc<HaarClassifierNode128>(*cpuCascadeAllocator, haarNumNodes);
|
|
||||||
h_haarFeatures = new NCVVectorAlloc<HaarFeature64>(*cpuCascadeAllocator, haarNumFeatures);
|
|
||||||
|
|
||||||
ncvAssertPrintReturn(h_haarStages->isMemAllocated(), "Error in cascade CPU allocator", NCV_CUDA_ERROR);
|
|
||||||
ncvAssertPrintReturn(h_haarNodes->isMemAllocated(), "Error in cascade CPU allocator", NCV_CUDA_ERROR);
|
|
||||||
ncvAssertPrintReturn(h_haarFeatures->isMemAllocated(), "Error in cascade CPU allocator", NCV_CUDA_ERROR);
|
|
||||||
|
|
||||||
ncvStat = ncvHaarLoadFromFile_host(classifierFile, haar, *h_haarStages, *h_haarNodes, *h_haarFeatures);
|
|
||||||
ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "Error loading classifier", NCV_FILE_ERROR);
|
|
||||||
|
|
||||||
d_haarStages = new NCVVectorAlloc<HaarStage64>(*gpuCascadeAllocator, haarNumStages);
|
|
||||||
d_haarNodes = new NCVVectorAlloc<HaarClassifierNode128>(*gpuCascadeAllocator, haarNumNodes);
|
|
||||||
d_haarFeatures = new NCVVectorAlloc<HaarFeature64>(*gpuCascadeAllocator, haarNumFeatures);
|
|
||||||
|
|
||||||
ncvAssertPrintReturn(d_haarStages->isMemAllocated(), "Error in cascade GPU allocator", NCV_CUDA_ERROR);
|
|
||||||
ncvAssertPrintReturn(d_haarNodes->isMemAllocated(), "Error in cascade GPU allocator", NCV_CUDA_ERROR);
|
|
||||||
ncvAssertPrintReturn(d_haarFeatures->isMemAllocated(), "Error in cascade GPU allocator", NCV_CUDA_ERROR);
|
|
||||||
|
|
||||||
ncvStat = h_haarStages->copySolid(*d_haarStages, 0);
|
|
||||||
ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "Error copying cascade to GPU", NCV_CUDA_ERROR);
|
|
||||||
ncvStat = h_haarNodes->copySolid(*d_haarNodes, 0);
|
|
||||||
ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "Error copying cascade to GPU", NCV_CUDA_ERROR);
|
|
||||||
ncvStat = h_haarFeatures->copySolid(*d_haarFeatures, 0);
|
|
||||||
ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "Error copying cascade to GPU", NCV_CUDA_ERROR);
|
|
||||||
|
|
||||||
return NCV_SUCCESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
NCVStatus calculateMemReqsAndAllocate(const Size& frameSize)
|
|
||||||
{
|
|
||||||
if (lastAllocatedFrameSize == frameSize)
|
|
||||||
{
|
|
||||||
return NCV_SUCCESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculate memory requirements and create real allocators
|
|
||||||
NCVMemStackAllocator gpuCounter(static_cast<int>(devProp.textureAlignment));
|
|
||||||
NCVMemStackAllocator cpuCounter(static_cast<int>(devProp.textureAlignment));
|
|
||||||
|
|
||||||
ncvAssertPrintReturn(gpuCounter.isInitialized(), "Error creating GPU memory counter", NCV_CUDA_ERROR);
|
|
||||||
ncvAssertPrintReturn(cpuCounter.isInitialized(), "Error creating CPU memory counter", NCV_CUDA_ERROR);
|
|
||||||
|
|
||||||
NCVMatrixAlloc<Ncv8u> d_src(gpuCounter, frameSize.width, frameSize.height);
|
|
||||||
NCVMatrixAlloc<Ncv8u> h_src(cpuCounter, frameSize.width, frameSize.height);
|
|
||||||
|
|
||||||
ncvAssertReturn(d_src.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
|
|
||||||
ncvAssertReturn(h_src.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
|
|
||||||
|
|
||||||
NCVVectorAlloc<NcvRect32u> d_rects(gpuCounter, 100);
|
|
||||||
ncvAssertReturn(d_rects.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
|
|
||||||
|
|
||||||
NcvSize32u roi;
|
|
||||||
roi.width = d_src.width();
|
|
||||||
roi.height = d_src.height();
|
|
||||||
Ncv32u numDetections;
|
|
||||||
ncvStat = ncvDetectObjectsMultiScale_device(d_src, roi, d_rects, numDetections, haar, *h_haarStages,
|
|
||||||
*d_haarStages, *d_haarNodes, *d_haarFeatures, haar.ClassifierSize, 4, 1.2f, 1, 0, gpuCounter, cpuCounter, devProp, 0);
|
|
||||||
|
|
||||||
ncvAssertReturnNcvStat(ncvStat);
|
|
||||||
ncvAssertCUDAReturn(cudaStreamSynchronize(0), NCV_CUDA_ERROR);
|
|
||||||
|
|
||||||
gpuAllocator = new NCVMemStackAllocator(NCVMemoryTypeDevice, gpuCounter.maxSize(), static_cast<int>(devProp.textureAlignment));
|
|
||||||
cpuAllocator = new NCVMemStackAllocator(NCVMemoryTypeHostPinned, cpuCounter.maxSize(), static_cast<int>(devProp.textureAlignment));
|
|
||||||
|
|
||||||
ncvAssertPrintReturn(gpuAllocator->isInitialized(), "Error creating GPU memory allocator", NCV_CUDA_ERROR);
|
|
||||||
ncvAssertPrintReturn(cpuAllocator->isInitialized(), "Error creating CPU memory allocator", NCV_CUDA_ERROR);
|
|
||||||
return NCV_SUCCESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
cudaDeviceProp devProp;
|
|
||||||
NCVStatus ncvStat;
|
|
||||||
|
|
||||||
Ptr<NCVMemNativeAllocator> gpuCascadeAllocator;
|
|
||||||
Ptr<NCVMemNativeAllocator> cpuCascadeAllocator;
|
|
||||||
|
|
||||||
Ptr<NCVVectorAlloc<HaarStage64> > h_haarStages;
|
|
||||||
Ptr<NCVVectorAlloc<HaarClassifierNode128> > h_haarNodes;
|
|
||||||
Ptr<NCVVectorAlloc<HaarFeature64> > h_haarFeatures;
|
|
||||||
|
|
||||||
HaarClassifierCascadeDescriptor haar;
|
|
||||||
|
|
||||||
Ptr<NCVVectorAlloc<HaarStage64> > d_haarStages;
|
|
||||||
Ptr<NCVVectorAlloc<HaarClassifierNode128> > d_haarNodes;
|
|
||||||
Ptr<NCVVectorAlloc<HaarFeature64> > d_haarFeatures;
|
|
||||||
|
|
||||||
Size lastAllocatedFrameSize;
|
|
||||||
|
|
||||||
Ptr<NCVMemStackAllocator> gpuAllocator;
|
|
||||||
Ptr<NCVMemStackAllocator> cpuAllocator;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
cv::gpu::CascadeClassifier_GPU::CascadeClassifier_GPU()
|
||||||
|
: findLargestObject(false), visualizeInPlace(false), impl(0) {}
|
||||||
|
|
||||||
|
cv::gpu::CascadeClassifier_GPU::CascadeClassifier_GPU(const string& filename)
|
||||||
|
: findLargestObject(false), visualizeInPlace(false), impl(0) { load(filename); }
|
||||||
|
|
||||||
cv::gpu::CascadeClassifier_GPU::CascadeClassifier_GPU() : findLargestObject(false), visualizeInPlace(false), impl(0) {}
|
|
||||||
cv::gpu::CascadeClassifier_GPU::CascadeClassifier_GPU(const string& filename) : findLargestObject(false), visualizeInPlace(false), impl(0) { load(filename); }
|
|
||||||
cv::gpu::CascadeClassifier_GPU::~CascadeClassifier_GPU() { release(); }
|
cv::gpu::CascadeClassifier_GPU::~CascadeClassifier_GPU() { release(); }
|
||||||
bool cv::gpu::CascadeClassifier_GPU::empty() const { return impl == 0; }
|
|
||||||
void cv::gpu::CascadeClassifier_GPU::release() { if (impl) { delete impl; impl = 0; } }
|
void cv::gpu::CascadeClassifier_GPU::release() { if (impl) { delete impl; impl = 0; } }
|
||||||
|
|
||||||
|
bool cv::gpu::CascadeClassifier_GPU::empty() const { return impl == 0; }
|
||||||
bool cv::gpu::CascadeClassifier_GPU::load(const string& filename)
|
|
||||||
{
|
|
||||||
release();
|
|
||||||
impl = new CascadeClassifierImpl(filename);
|
|
||||||
return !this->empty();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
Size cv::gpu::CascadeClassifier_GPU::getClassifierSize() const
|
Size cv::gpu::CascadeClassifier_GPU::getClassifierSize() const
|
||||||
{
|
{
|
||||||
return this->empty() ? Size() : impl->getClassifierCvSize();
|
return this->empty() ? Size() : impl->getClassifierCvSize();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int cv::gpu::CascadeClassifier_GPU::detectMultiScale( const GpuMat& image, GpuMat& objectsBuf, double scaleFactor, int minNeighbors, Size minSize)
|
int cv::gpu::CascadeClassifier_GPU::detectMultiScale( const GpuMat& image, GpuMat& objectsBuf, double scaleFactor, int minNeighbors, Size minSize)
|
||||||
{
|
{
|
||||||
CV_Assert( scaleFactor > 1 && image.depth() == CV_8U);
|
|
||||||
CV_Assert( !this->empty());
|
CV_Assert( !this->empty());
|
||||||
|
return impl->process(image, objectsBuf, (float)scaleFactor, minNeighbors, findLargestObject, visualizeInPlace, minSize, cv::Size());
|
||||||
const int defaultObjSearchNum = 100;
|
|
||||||
if (objectsBuf.empty())
|
|
||||||
{
|
|
||||||
objectsBuf.create(1, defaultObjSearchNum, DataType<Rect>::type);
|
|
||||||
}
|
|
||||||
|
|
||||||
NcvSize32u ncvMinSize = impl->getClassifierSize();
|
|
||||||
|
|
||||||
if (ncvMinSize.width < (unsigned)minSize.width && ncvMinSize.height < (unsigned)minSize.height)
|
|
||||||
{
|
|
||||||
ncvMinSize.width = minSize.width;
|
|
||||||
ncvMinSize.height = minSize.height;
|
|
||||||
}
|
|
||||||
|
|
||||||
unsigned int numDetections;
|
|
||||||
ncvSafeCall( impl->process(image, objectsBuf, (float)scaleFactor, minNeighbors, findLargestObject, visualizeInPlace, ncvMinSize, numDetections) );
|
|
||||||
|
|
||||||
return numDetections;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int cv::gpu::CascadeClassifier_GPU::detectMultiScale(const GpuMat& image, GpuMat& objectsBuf, Size maxObjectSize, Size minSize, double scaleFactor, int minNeighbors)
|
||||||
|
{
|
||||||
|
CV_Assert( !this->empty());
|
||||||
|
return impl->process(image, objectsBuf, (float)scaleFactor, minNeighbors, findLargestObject, visualizeInPlace, minSize, maxObjectSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool cv::gpu::CascadeClassifier_GPU::load(const string& filename)
|
||||||
|
{
|
||||||
|
release();
|
||||||
|
|
||||||
|
std::string fext = filename.substr(filename.find_last_of(".") + 1);
|
||||||
|
std::transform(fext.begin(), fext.end(), fext.begin(), ::tolower);
|
||||||
|
|
||||||
|
if (fext == "nvbin")
|
||||||
|
{
|
||||||
|
impl = new HaarCascade();
|
||||||
|
return impl->read(filename);
|
||||||
|
}
|
||||||
|
|
||||||
|
FileStorage fs(filename, FileStorage::READ);
|
||||||
|
|
||||||
|
if (!fs.isOpened())
|
||||||
|
{
|
||||||
|
impl = new HaarCascade();
|
||||||
|
return impl->read(filename);
|
||||||
|
}
|
||||||
|
|
||||||
|
const char *GPU_CC_LBP = "LBP";
|
||||||
|
string featureTypeStr = (string)fs.getFirstTopLevelNode()["featureType"];
|
||||||
|
if (featureTypeStr == GPU_CC_LBP)
|
||||||
|
impl = new LbpCascade();
|
||||||
|
else
|
||||||
|
impl = new HaarCascade();
|
||||||
|
|
||||||
|
impl->read(filename);
|
||||||
|
return !this->empty();
|
||||||
|
}
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
struct RectConvert
|
struct RectConvert
|
||||||
{
|
{
|
||||||
@ -708,7 +735,6 @@ struct RectConvert
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
void groupRectangles(std::vector<NcvRect32u> &hypotheses, int groupThreshold, double eps, std::vector<Ncv32u> *weights)
|
void groupRectangles(std::vector<NcvRect32u> &hypotheses, int groupThreshold, double eps, std::vector<Ncv32u> *weights)
|
||||||
{
|
{
|
||||||
vector<Rect> rects(hypotheses.size());
|
vector<Rect> rects(hypotheses.size());
|
||||||
|
@ -290,6 +290,7 @@ namespace cv { namespace gpu { namespace device
|
|||||||
{
|
{
|
||||||
const int block = 128;
|
const int block = 128;
|
||||||
int grid = divUp(workAmount, block);
|
int grid = divUp(workAmount, block);
|
||||||
|
cudaFuncSetCacheConfig(lbp_cascade, cudaFuncCachePreferL1);
|
||||||
Cascade cascade((Stage*)mstages.ptr(), nstages, (ClNode*)mnodes.ptr(), mleaves.ptr(), msubsets.ptr(), (uchar4*)mfeatures.ptr(), subsetSize);
|
Cascade cascade((Stage*)mstages.ptr(), nstages, (ClNode*)mnodes.ptr(), mleaves.ptr(), msubsets.ptr(), (uchar4*)mfeatures.ptr(), subsetSize);
|
||||||
lbp_cascade<<<grid, block>>>(cascade, frameW, frameH, windowW, windowH, initialScale, factor, workAmount, integral.ptr(), integral.step / sizeof(int), objects, classified);
|
lbp_cascade<<<grid, block>>>(cascade, frameW, frameH, windowW, windowH, initialScale, factor, workAmount, integral.ptr(), integral.step / sizeof(int), objects, classified);
|
||||||
}
|
}
|
||||||
|
@ -302,7 +302,7 @@ PARAM_TEST_CASE(LBP_Read_classifier, cv::gpu::DeviceInfo, int)
|
|||||||
|
|
||||||
TEST_P(LBP_Read_classifier, Accuracy)
|
TEST_P(LBP_Read_classifier, Accuracy)
|
||||||
{
|
{
|
||||||
cv::gpu::CascadeClassifier_GPU_LBP classifier;
|
cv::gpu::CascadeClassifier_GPU classifier;
|
||||||
std::string classifierXmlPath = std::string(cvtest::TS::ptr()->get_data_path()) + "lbpcascade/lbpcascade_frontalface.xml";
|
std::string classifierXmlPath = std::string(cvtest::TS::ptr()->get_data_path()) + "lbpcascade/lbpcascade_frontalface.xml";
|
||||||
ASSERT_TRUE(classifier.load(classifierXmlPath));
|
ASSERT_TRUE(classifier.load(classifierXmlPath));
|
||||||
}
|
}
|
||||||
@ -344,7 +344,7 @@ TEST_P(LBP_classify, Accuracy)
|
|||||||
for (; it != rects.end(); ++it)
|
for (; it != rects.end(); ++it)
|
||||||
cv::rectangle(markedImage, *it, CV_RGB(0, 0, 255));
|
cv::rectangle(markedImage, *it, CV_RGB(0, 0, 255));
|
||||||
|
|
||||||
cv::gpu::CascadeClassifier_GPU_LBP gpuClassifier;
|
cv::gpu::CascadeClassifier_GPU gpuClassifier;
|
||||||
ASSERT_TRUE(gpuClassifier.load(classifierXmlPath));
|
ASSERT_TRUE(gpuClassifier.load(classifierXmlPath));
|
||||||
|
|
||||||
cv::gpu::GpuMat gpu_rects;
|
cv::gpu::GpuMat gpu_rects;
|
||||||
@ -359,8 +359,8 @@ TEST_P(LBP_classify, Accuracy)
|
|||||||
|
|
||||||
#if defined (LOG_CASCADE_STATISTIC)
|
#if defined (LOG_CASCADE_STATISTIC)
|
||||||
std::cout << r.x << " " << r.y << " " << r.width << " " << r.height << std::endl;
|
std::cout << r.x << " " << r.y << " " << r.width << " " << r.height << std::endl;
|
||||||
#endif
|
|
||||||
cv::rectangle(markedImage, r , CV_RGB(255, 0, 0));
|
cv::rectangle(markedImage, r , CV_RGB(255, 0, 0));
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined (LOG_CASCADE_STATISTIC)
|
#if defined (LOG_CASCADE_STATISTIC)
|
||||||
|
Loading…
Reference in New Issue
Block a user