2010-12-17 23:41:26 +08:00
|
|
|
/*M///////////////////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
|
|
|
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
|
|
|
//
|
|
|
|
// By downloading, copying, installing or using the software you agree to this license.
|
|
|
|
// If you do not agree to this license, do not download, install,
|
|
|
|
// copy or use the software.
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// License Agreement
|
|
|
|
// For Open Source Computer Vision Library
|
|
|
|
//
|
|
|
|
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
|
|
|
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
|
|
|
// Third party copyrights are property of their respective owners.
|
|
|
|
//
|
|
|
|
// Redistribution and use in source and binary forms, with or without modification,
|
|
|
|
// are permitted provided that the following conditions are met:
|
|
|
|
//
|
|
|
|
// * Redistribution's of source code must retain the above copyright notice,
|
|
|
|
// this list of conditions and the following disclaimer.
|
|
|
|
//
|
|
|
|
// * Redistribution's in binary form must reproduce the above copyright notice,
|
|
|
|
// this list of conditions and the following disclaimer in the documentation
|
|
|
|
// and/or other GpuMaterials provided with the distribution.
|
|
|
|
//
|
|
|
|
// * The name of the copyright holders may not be used to endorse or promote products
|
|
|
|
// derived from this software without specific prior written permission.
|
|
|
|
//
|
|
|
|
// This software is provided by the copyright holders and contributors "as is" and
|
|
|
|
// any express or bpied warranties, including, but not limited to, the bpied
|
|
|
|
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
|
|
|
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
|
|
|
// indirect, incidental, special, exemplary, or consequential damages
|
|
|
|
// (including, but not limited to, procurement of substitute goods or services;
|
|
|
|
// loss of use, data, or profits; or business interruption) however caused
|
|
|
|
// and on any theory of liability, whether in contract, strict liability,
|
|
|
|
// or tort (including negligence or otherwise) arising in any way out of
|
|
|
|
// the use of this software, even if advised of the possibility of such damage.
|
|
|
|
//
|
|
|
|
//M*/
|
|
|
|
|
|
|
|
#include "precomp.hpp"
|
2012-06-22 23:00:36 +08:00
|
|
|
#include <vector>
|
2012-06-25 16:46:34 +08:00
|
|
|
#include <iostream>
|
2010-12-17 23:41:26 +08:00
|
|
|
|
|
|
|
using namespace cv;
|
|
|
|
using namespace cv::gpu;
|
|
|
|
using namespace std;
|
|
|
|
|
2012-06-22 23:00:36 +08:00
|
|
|
#if !defined (HAVE_CUDA)
|
|
|
|
// ============ old fashioned haar cascade ==============================================//
|
|
|
|
cv::gpu::CascadeClassifier_GPU::CascadeClassifier_GPU() { throw_nogpu(); }
|
2011-01-13 21:04:00 +08:00
|
|
|
cv::gpu::CascadeClassifier_GPU::CascadeClassifier_GPU(const string&) { throw_nogpu(); }
|
2012-06-22 23:00:36 +08:00
|
|
|
cv::gpu::CascadeClassifier_GPU::~CascadeClassifier_GPU() { throw_nogpu(); }
|
2010-12-17 23:41:26 +08:00
|
|
|
|
2012-07-02 16:07:46 +08:00
|
|
|
bool cv::gpu::CascadeClassifier_GPU::empty() const { throw_nogpu(); return true; }
|
|
|
|
bool cv::gpu::CascadeClassifier_GPU::load(const string&) { throw_nogpu(); return true; }
|
|
|
|
Size cv::gpu::CascadeClassifier_GPU::getClassifierSize() const { throw_nogpu(); return Size(); }
|
2010-12-17 23:41:26 +08:00
|
|
|
|
2011-01-13 21:04:00 +08:00
|
|
|
int cv::gpu::CascadeClassifier_GPU::detectMultiScale( const GpuMat& , GpuMat& , double , int , Size) { throw_nogpu(); return 0; }
|
2010-12-17 23:41:26 +08:00
|
|
|
|
2012-06-22 23:00:36 +08:00
|
|
|
// ============ LBP cascade ==============================================//
|
2012-07-05 00:03:48 +08:00
|
|
|
cv::gpu::CascadeClassifier_GPU_LBP::CascadeClassifier_GPU_LBP(cv::Size /*frameSize*/){ throw_nogpu(); }
|
|
|
|
cv::gpu::CascadeClassifier_GPU_LBP::~CascadeClassifier_GPU_LBP() { throw_nogpu(); }
|
2012-06-22 23:00:36 +08:00
|
|
|
|
2012-06-26 00:39:45 +08:00
|
|
|
bool cv::gpu::CascadeClassifier_GPU_LBP::empty() const { throw_nogpu(); return true; }
|
|
|
|
bool cv::gpu::CascadeClassifier_GPU_LBP::load(const string&) { throw_nogpu(); return true; }
|
|
|
|
Size cv::gpu::CascadeClassifier_GPU_LBP::getClassifierSize() const { throw_nogpu(); return Size(); }
|
2012-07-01 04:45:01 +08:00
|
|
|
void cv::gpu::CascadeClassifier_GPU_LBP::preallocateIntegralBuffer(cv::Size /*desired*/) { throw_nogpu();}
|
2012-07-05 00:03:48 +08:00
|
|
|
void cv::gpu::CascadeClassifier_GPU_LBP::initializeBuffers(cv::Size /*frame*/) { throw_nogpu();}
|
2012-06-22 23:00:36 +08:00
|
|
|
|
2012-07-10 19:58:15 +08:00
|
|
|
int cv::gpu::CascadeClassifier_GPU_LBP::detectMultiScale(const cv::gpu::GpuMat& /*image*/, cv::gpu::GpuMat& /*objectsBuf*/,
|
2012-07-02 18:54:05 +08:00
|
|
|
double /*scaleFactor*/, int /*minNeighbors*/, cv::Size /*maxObjectSize*/){ throw_nogpu(); return 0;}
|
2012-06-22 23:00:36 +08:00
|
|
|
|
2010-12-17 23:41:26 +08:00
|
|
|
#else
|
|
|
|
|
2012-07-13 23:47:09 +08:00
|
|
|
cv::gpu::CascadeClassifier_GPU_LBP::CascadeClassifier_GPU_LBP(cv::Size detectionFrameSize) { allocateBuffers(detectionFrameSize); }
|
|
|
|
cv::gpu::CascadeClassifier_GPU_LBP::~CascadeClassifier_GPU_LBP(){}
|
2012-07-05 00:03:48 +08:00
|
|
|
|
2012-07-13 23:47:09 +08:00
|
|
|
void cv::gpu::CascadeClassifier_GPU_LBP::allocateBuffers(cv::Size frame)
|
2012-07-05 00:03:48 +08:00
|
|
|
{
|
2012-07-13 23:47:09 +08:00
|
|
|
if (frame == cv::Size())
|
|
|
|
return;
|
|
|
|
|
2012-07-05 00:03:48 +08:00
|
|
|
if (resuzeBuffer.empty() || frame.width > resuzeBuffer.cols || frame.height > resuzeBuffer.rows)
|
|
|
|
{
|
|
|
|
resuzeBuffer.create(frame, CV_8UC1);
|
|
|
|
|
|
|
|
integral.create(frame.height + 1, frame.width + 1, CV_32SC1);
|
|
|
|
NcvSize32u roiSize;
|
|
|
|
roiSize.width = frame.width;
|
|
|
|
roiSize.height = frame.height;
|
|
|
|
|
|
|
|
cudaDeviceProp prop;
|
|
|
|
cudaSafeCall( cudaGetDeviceProperties(&prop, cv::gpu::getDevice()) );
|
|
|
|
|
|
|
|
Ncv32u bufSize;
|
|
|
|
ncvSafeCall( nppiStIntegralGetSize_8u32u(roiSize, &bufSize, prop) );
|
|
|
|
integralBuffer.create(1, bufSize, CV_8UC1);
|
2012-07-13 23:47:09 +08:00
|
|
|
|
|
|
|
candidates.create(1 , frame.width >> 1, CV_32SC4);
|
2012-07-05 00:03:48 +08:00
|
|
|
}
|
|
|
|
}
|
2012-06-22 23:00:36 +08:00
|
|
|
|
2012-07-13 23:47:09 +08:00
|
|
|
|
2012-06-22 23:00:36 +08:00
|
|
|
|
2012-06-26 00:39:45 +08:00
|
|
|
void cv::gpu::CascadeClassifier_GPU_LBP::preallocateIntegralBuffer(cv::Size desired)
|
|
|
|
{
|
2012-06-26 20:15:19 +08:00
|
|
|
integral.create(desired.width + 1, desired.height + 1, CV_32SC1);
|
2012-06-26 00:39:45 +08:00
|
|
|
}
|
|
|
|
|
2012-06-26 00:39:34 +08:00
|
|
|
bool cv::gpu::CascadeClassifier_GPU_LBP::empty() const
|
|
|
|
{
|
|
|
|
return stage_mat.empty();
|
|
|
|
}
|
2012-06-22 23:00:36 +08:00
|
|
|
|
|
|
|
bool cv::gpu::CascadeClassifier_GPU_LBP::load(const string& classifierAsXml)
|
|
|
|
{
|
|
|
|
FileStorage fs(classifierAsXml, FileStorage::READ);
|
|
|
|
if (!fs.isOpened())
|
|
|
|
return false;
|
2012-06-26 00:39:34 +08:00
|
|
|
return read(fs.getFirstTopLevelNode());
|
2012-06-22 23:00:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#define GPU_CC_STAGE_TYPE "stageType"
|
|
|
|
#define GPU_CC_FEATURE_TYPE "featureType"
|
|
|
|
#define GPU_CC_BOOST "BOOST"
|
|
|
|
#define GPU_CC_LBP "LBP"
|
|
|
|
#define GPU_CC_MAX_CAT_COUNT "maxCatCount"
|
|
|
|
#define GPU_CC_HEIGHT "height"
|
|
|
|
#define GPU_CC_WIDTH "width"
|
|
|
|
#define GPU_CC_STAGE_PARAMS "stageParams"
|
|
|
|
#define GPU_CC_MAX_DEPTH "maxDepth"
|
|
|
|
#define GPU_CC_FEATURE_PARAMS "featureParams"
|
|
|
|
#define GPU_CC_STAGES "stages"
|
|
|
|
#define GPU_CC_STAGE_THRESHOLD "stageThreshold"
|
|
|
|
#define GPU_THRESHOLD_EPS 1e-5f
|
|
|
|
#define GPU_CC_WEAK_CLASSIFIERS "weakClassifiers"
|
|
|
|
#define GPU_CC_INTERNAL_NODES "internalNodes"
|
|
|
|
#define GPU_CC_LEAF_VALUES "leafValues"
|
2012-06-26 20:15:19 +08:00
|
|
|
#define GPU_CC_FEATURES "features"
|
|
|
|
#define GPU_CC_RECT "rect"
|
2012-06-22 23:00:36 +08:00
|
|
|
|
2012-07-02 16:08:11 +08:00
|
|
|
struct Stage
|
|
|
|
{
|
|
|
|
int first;
|
|
|
|
int ntrees;
|
|
|
|
float threshold;
|
|
|
|
};
|
|
|
|
|
2012-07-02 16:07:46 +08:00
|
|
|
// currently only stump based boost classifiers are supported
|
2012-06-22 23:00:36 +08:00
|
|
|
bool CascadeClassifier_GPU_LBP::read(const FileNode &root)
|
|
|
|
{
|
2012-06-25 16:46:34 +08:00
|
|
|
std::string stageTypeStr = (string)root[GPU_CC_STAGE_TYPE];
|
2012-06-22 23:00:36 +08:00
|
|
|
CV_Assert(stageTypeStr == GPU_CC_BOOST);
|
|
|
|
|
|
|
|
string featureTypeStr = (string)root[GPU_CC_FEATURE_TYPE];
|
|
|
|
CV_Assert(featureTypeStr == GPU_CC_LBP);
|
|
|
|
|
|
|
|
NxM.width = (int)root[GPU_CC_WIDTH];
|
|
|
|
NxM.height = (int)root[GPU_CC_HEIGHT];
|
|
|
|
CV_Assert( NxM.height > 0 && NxM.width > 0 );
|
|
|
|
|
|
|
|
isStumps = ((int)(root[GPU_CC_STAGE_PARAMS][GPU_CC_MAX_DEPTH]) == 1) ? true : false;
|
2012-06-26 00:39:45 +08:00
|
|
|
CV_Assert(isStumps);
|
2012-06-22 23:00:36 +08:00
|
|
|
|
|
|
|
FileNode fn = root[GPU_CC_FEATURE_PARAMS];
|
|
|
|
if (fn.empty())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
ncategories = fn[GPU_CC_MAX_CAT_COUNT];
|
2012-06-25 16:46:34 +08:00
|
|
|
|
2012-07-02 16:07:46 +08:00
|
|
|
subsetSize = (ncategories + 31) / 32;
|
|
|
|
nodeStep = 3 + ( ncategories > 0 ? subsetSize : 1 );
|
2012-06-22 23:00:36 +08:00
|
|
|
|
|
|
|
fn = root[GPU_CC_STAGES];
|
|
|
|
if (fn.empty())
|
|
|
|
return false;
|
|
|
|
|
2012-06-25 16:46:34 +08:00
|
|
|
std::vector<Stage> stages;
|
|
|
|
stages.reserve(fn.size());
|
2012-06-22 23:00:36 +08:00
|
|
|
|
2012-06-26 00:39:29 +08:00
|
|
|
std::vector<int> cl_trees;
|
2012-07-02 16:07:46 +08:00
|
|
|
std::vector<int> cl_nodes;
|
2012-06-22 23:00:36 +08:00
|
|
|
std::vector<float> cl_leaves;
|
|
|
|
std::vector<int> subsets;
|
|
|
|
|
|
|
|
FileNodeIterator it = fn.begin(), it_end = fn.end();
|
|
|
|
for (size_t si = 0; it != it_end; si++, ++it )
|
|
|
|
{
|
|
|
|
FileNode fns = *it;
|
2012-06-25 16:46:34 +08:00
|
|
|
Stage st;
|
|
|
|
st.threshold = (float)fns[GPU_CC_STAGE_THRESHOLD] - GPU_THRESHOLD_EPS;
|
2012-06-22 23:00:36 +08:00
|
|
|
|
|
|
|
fns = fns[GPU_CC_WEAK_CLASSIFIERS];
|
|
|
|
if (fns.empty())
|
|
|
|
return false;
|
|
|
|
|
2012-06-25 16:46:34 +08:00
|
|
|
st.ntrees = (int)fns.size();
|
|
|
|
st.first = (int)cl_trees.size();
|
|
|
|
|
2012-07-02 16:07:46 +08:00
|
|
|
stages.push_back(st);// (int, int, float)
|
2012-06-22 23:00:36 +08:00
|
|
|
|
|
|
|
cl_trees.reserve(stages[si].first + stages[si].ntrees);
|
|
|
|
|
|
|
|
// weak trees
|
|
|
|
FileNodeIterator it1 = fns.begin(), it1_end = fns.end();
|
|
|
|
for ( ; it1 != it1_end; ++it1 )
|
|
|
|
{
|
|
|
|
FileNode fnw = *it1;
|
|
|
|
|
|
|
|
FileNode internalNodes = fnw[GPU_CC_INTERNAL_NODES];
|
|
|
|
FileNode leafValues = fnw[GPU_CC_LEAF_VALUES];
|
|
|
|
if ( internalNodes.empty() || leafValues.empty() )
|
|
|
|
return false;
|
2012-07-02 16:07:46 +08:00
|
|
|
|
2012-06-26 00:39:29 +08:00
|
|
|
int nodeCount = (int)internalNodes.size()/nodeStep;
|
|
|
|
cl_trees.push_back(nodeCount);
|
2012-06-25 16:46:34 +08:00
|
|
|
|
2012-07-02 16:07:46 +08:00
|
|
|
cl_nodes.reserve((cl_nodes.size() + nodeCount) * 3);
|
2012-06-22 23:00:36 +08:00
|
|
|
cl_leaves.reserve(cl_leaves.size() + leafValues.size());
|
|
|
|
|
|
|
|
if( subsetSize > 0 )
|
2012-06-26 00:39:29 +08:00
|
|
|
subsets.reserve(subsets.size() + nodeCount * subsetSize);
|
2012-06-22 23:00:36 +08:00
|
|
|
|
|
|
|
// nodes
|
|
|
|
FileNodeIterator iIt = internalNodes.begin(), iEnd = internalNodes.end();
|
|
|
|
|
|
|
|
for( ; iIt != iEnd; )
|
|
|
|
{
|
2012-07-02 16:07:46 +08:00
|
|
|
cl_nodes.push_back((int)*(iIt++));
|
|
|
|
cl_nodes.push_back((int)*(iIt++));
|
|
|
|
cl_nodes.push_back((int)*(iIt++));
|
2012-06-22 23:00:36 +08:00
|
|
|
|
2012-06-25 16:46:34 +08:00
|
|
|
if( subsetSize > 0 )
|
2012-06-22 23:00:36 +08:00
|
|
|
for( int j = 0; j < subsetSize; j++, ++iIt )
|
2012-06-25 16:46:34 +08:00
|
|
|
subsets.push_back((int)*iIt);
|
2012-06-22 23:00:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// leaves
|
2012-06-25 16:46:34 +08:00
|
|
|
iIt = leafValues.begin(), iEnd = leafValues.end();
|
2012-06-22 23:00:36 +08:00
|
|
|
for( ; iIt != iEnd; ++iIt )
|
|
|
|
cl_leaves.push_back((float)*iIt);
|
|
|
|
}
|
|
|
|
}
|
2012-07-02 16:07:46 +08:00
|
|
|
|
2012-06-26 20:15:19 +08:00
|
|
|
fn = root[GPU_CC_FEATURES];
|
|
|
|
if( fn.empty() )
|
|
|
|
return false;
|
2012-07-02 16:07:46 +08:00
|
|
|
std::vector<uchar> features;
|
2012-06-26 20:15:19 +08:00
|
|
|
features.reserve(fn.size() * 4);
|
|
|
|
FileNodeIterator f_it = fn.begin(), f_end = fn.end();
|
|
|
|
for (; f_it != f_end; ++f_it)
|
|
|
|
{
|
2012-07-02 16:07:46 +08:00
|
|
|
FileNode rect = (*f_it)[GPU_CC_RECT];
|
2012-06-26 20:15:19 +08:00
|
|
|
FileNodeIterator r_it = rect.begin();
|
|
|
|
features.push_back(saturate_cast<uchar>((int)*(r_it++)));
|
|
|
|
features.push_back(saturate_cast<uchar>((int)*(r_it++)));
|
|
|
|
features.push_back(saturate_cast<uchar>((int)*(r_it++)));
|
|
|
|
features.push_back(saturate_cast<uchar>((int)*(r_it++)));
|
|
|
|
}
|
|
|
|
|
2012-06-25 16:46:34 +08:00
|
|
|
// copy data structures on gpu
|
2012-07-02 16:07:46 +08:00
|
|
|
stage_mat.upload(cv::Mat(1, stages.size() * sizeof(Stage), CV_8UC1, (uchar*)&(stages[0]) ));
|
|
|
|
trees_mat.upload(cv::Mat(cl_trees).reshape(1,1));
|
|
|
|
nodes_mat.upload(cv::Mat(cl_nodes).reshape(1,1));
|
|
|
|
leaves_mat.upload(cv::Mat(cl_leaves).reshape(1,1));
|
|
|
|
subsets_mat.upload(cv::Mat(subsets).reshape(1,1));
|
|
|
|
features_mat.upload(cv::Mat(features).reshape(4,1));
|
2012-06-26 20:15:19 +08:00
|
|
|
|
2012-06-22 23:00:36 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
#undef GPU_CC_STAGE_TYPE
|
|
|
|
#undef GPU_CC_BOOST
|
|
|
|
#undef GPU_CC_FEATURE_TYPE
|
|
|
|
#undef GPU_CC_LBP
|
|
|
|
#undef GPU_CC_MAX_CAT_COUNT
|
|
|
|
#undef GPU_CC_HEIGHT
|
|
|
|
#undef GPU_CC_WIDTH
|
|
|
|
#undef GPU_CC_STAGE_PARAMS
|
|
|
|
#undef GPU_CC_MAX_DEPTH
|
|
|
|
#undef GPU_CC_FEATURE_PARAMS
|
|
|
|
#undef GPU_CC_STAGES
|
|
|
|
#undef GPU_CC_STAGE_THRESHOLD
|
|
|
|
#undef GPU_THRESHOLD_EPS
|
|
|
|
#undef GPU_CC_WEAK_CLASSIFIERS
|
|
|
|
#undef GPU_CC_INTERNAL_NODES
|
|
|
|
#undef GPU_CC_LEAF_VALUES
|
2012-06-26 20:44:27 +08:00
|
|
|
#undef GPU_CC_FEATURES
|
|
|
|
#undef GPU_CC_RECT
|
2012-06-22 23:00:36 +08:00
|
|
|
|
2012-06-26 00:39:34 +08:00
|
|
|
Size cv::gpu::CascadeClassifier_GPU_LBP::getClassifierSize() const
|
|
|
|
{
|
|
|
|
return NxM;
|
|
|
|
}
|
2012-06-22 23:00:36 +08:00
|
|
|
|
2012-06-26 00:39:40 +08:00
|
|
|
namespace cv { namespace gpu { namespace device
|
2012-06-26 00:39:34 +08:00
|
|
|
{
|
2012-06-26 00:39:40 +08:00
|
|
|
namespace lbp
|
|
|
|
{
|
2012-07-12 16:50:36 +08:00
|
|
|
void classifyStumpFixed(const DevMem2Di& integral,
|
|
|
|
const int integralPitch,
|
|
|
|
const DevMem2Db& mstages,
|
|
|
|
const int nstages,
|
|
|
|
const DevMem2Di& mnodes,
|
|
|
|
const DevMem2Df& mleaves,
|
|
|
|
const DevMem2Di& msubsets,
|
|
|
|
const DevMem2Db& mfeatures,
|
|
|
|
const int workWidth,
|
|
|
|
const int workHeight,
|
|
|
|
const int clWidth,
|
|
|
|
const int clHeight,
|
|
|
|
float scale,
|
|
|
|
int step,
|
|
|
|
int subsetSize,
|
|
|
|
DevMem2D_<int4> objects,
|
|
|
|
unsigned int* classified);
|
2012-07-11 20:22:22 +08:00
|
|
|
|
|
|
|
int connectedConmonents(DevMem2D_<int4> candidates, int ncandidates, DevMem2D_<int4> objects,int groupThreshold, float grouping_eps, unsigned int* nclasses);
|
2012-07-10 19:58:15 +08:00
|
|
|
void bindIntegral(DevMem2Di integral);
|
|
|
|
void unbindIntegral();
|
2012-06-26 00:39:40 +08:00
|
|
|
}
|
|
|
|
}}}
|
|
|
|
|
2012-07-10 19:58:15 +08:00
|
|
|
int cv::gpu::CascadeClassifier_GPU_LBP::detectMultiScale(const GpuMat& image, GpuMat& objects,
|
2012-07-04 12:51:09 +08:00
|
|
|
double scaleFactor, int groupThreshold, cv::Size maxObjectSize /*, Size minSize=Size()*/)
|
2012-06-26 00:39:40 +08:00
|
|
|
{
|
|
|
|
CV_Assert( scaleFactor > 1 && image.depth() == CV_8U );
|
2012-06-26 20:15:19 +08:00
|
|
|
CV_Assert(!empty());
|
2012-06-26 00:39:40 +08:00
|
|
|
|
|
|
|
const int defaultObjSearchNum = 100;
|
2012-07-04 12:51:09 +08:00
|
|
|
const float grouping_eps = 0.2;
|
2012-06-26 00:39:40 +08:00
|
|
|
|
2012-07-02 16:08:17 +08:00
|
|
|
if( !objects.empty() && objects.depth() == CV_32S)
|
|
|
|
objects.reshape(4, 1);
|
|
|
|
else
|
2012-07-10 19:58:15 +08:00
|
|
|
objects.create(1 , image.cols >> 4, CV_32SC4);
|
2012-07-13 23:47:09 +08:00
|
|
|
|
|
|
|
candidates.create(1 , image.cols >> 1, CV_32SC4);
|
2012-07-10 19:58:15 +08:00
|
|
|
// GpuMat candidates(1 , defaultObjSearchNum, CV_32SC4);
|
|
|
|
// used for debug
|
2012-07-11 20:22:28 +08:00
|
|
|
// candidates.setTo(cv::Scalar::all(0));
|
|
|
|
// objects.setTo(cv::Scalar::all(0));
|
2012-07-02 16:08:11 +08:00
|
|
|
if (maxObjectSize == cv::Size())
|
|
|
|
maxObjectSize = image.size();
|
|
|
|
|
2012-07-13 23:47:09 +08:00
|
|
|
allocateBuffers(image.size());
|
|
|
|
|
|
|
|
unsigned int classified = 0;
|
2012-07-04 12:51:00 +08:00
|
|
|
unsigned int* dclassified;
|
|
|
|
cudaMalloc(&dclassified, sizeof(int));
|
2012-07-13 23:47:09 +08:00
|
|
|
cudaMemcpy(dclassified, &classified, sizeof(int), cudaMemcpyHostToDevice);
|
2012-07-11 20:22:22 +08:00
|
|
|
int step = 2;
|
2012-07-12 16:50:36 +08:00
|
|
|
// cv::gpu::device::lbp::bindIntegral(integral);
|
2012-06-26 00:39:40 +08:00
|
|
|
|
2012-07-11 20:22:22 +08:00
|
|
|
cv::Size scaledImageSize(image.cols, image.rows);
|
|
|
|
cv::Size processingRectSize( scaledImageSize.width - NxM.width + 1, scaledImageSize.height - NxM.height + 1 );
|
|
|
|
cv::Size windowSize(NxM.width, NxM.height);
|
|
|
|
|
|
|
|
double factor = 1;
|
|
|
|
|
2012-07-12 15:11:26 +08:00
|
|
|
for (; ;)
|
2012-06-26 00:39:40 +08:00
|
|
|
{
|
2012-07-11 20:22:22 +08:00
|
|
|
if (processingRectSize.width <= 0 || processingRectSize.height <= 0 )
|
|
|
|
break;
|
|
|
|
|
|
|
|
if( windowSize.width > maxObjectSize.width || windowSize.height > maxObjectSize.height )
|
|
|
|
break;
|
|
|
|
|
|
|
|
// if( windowSize.width < minObjectSize.width || windowSize.height < minObjectSize.height )
|
|
|
|
// continue;
|
|
|
|
|
2012-07-13 23:47:09 +08:00
|
|
|
GpuMat scaledImg = resuzeBuffer(cv::Rect(0, 0, scaledImageSize.width, scaledImageSize.height));
|
|
|
|
GpuMat scaledIntegral = integral(cv::Rect(0, 0, scaledImageSize.width + 1, scaledImageSize.height + 1));
|
2012-07-11 20:22:22 +08:00
|
|
|
GpuMat currBuff = integralBuffer;
|
|
|
|
|
|
|
|
cv::gpu::resize(image, scaledImg, scaledImageSize, 0, 0, CV_INTER_LINEAR);
|
|
|
|
cv::gpu::integralBuffered(scaledImg, scaledIntegral, currBuff);
|
|
|
|
|
|
|
|
step = (factor <= 2.) + 1;
|
|
|
|
|
2012-07-12 16:50:36 +08:00
|
|
|
cv::gpu::device::lbp::classifyStumpFixed(integral, integral.step1(), stage_mat, stage_mat.cols / sizeof(Stage), nodes_mat, leaves_mat, subsets_mat, features_mat,
|
2012-07-10 19:58:15 +08:00
|
|
|
processingRectSize.width, processingRectSize.height, windowSize.width, windowSize.height, factor, step, subsetSize, candidates, dclassified);
|
2012-07-11 20:22:22 +08:00
|
|
|
|
|
|
|
factor *= scaleFactor;
|
|
|
|
windowSize = cv::Size(cvRound(NxM.width * factor), cvRound(NxM.height * factor));
|
|
|
|
scaledImageSize = cv::Size(cvRound( image.cols / factor ), cvRound( image.rows / factor ));
|
|
|
|
processingRectSize = cv::Size(scaledImageSize.width - NxM.width + 1, scaledImageSize.height - NxM.height + 1 );
|
2012-06-26 00:39:40 +08:00
|
|
|
}
|
2012-07-10 19:58:15 +08:00
|
|
|
|
2012-07-12 16:50:36 +08:00
|
|
|
// cv::gpu::device::lbp::unbindIntegral();
|
2012-07-04 12:51:09 +08:00
|
|
|
if (groupThreshold <= 0 || objects.empty())
|
|
|
|
return 0;
|
2012-07-13 23:47:09 +08:00
|
|
|
cudaMemcpy(&classified, dclassified, sizeof(int), cudaMemcpyDeviceToHost);
|
|
|
|
cv::gpu::device::lbp::connectedConmonents(candidates, classified, objects, groupThreshold, grouping_eps, dclassified);
|
|
|
|
cudaMemcpy(&classified, dclassified, sizeof(int), cudaMemcpyDeviceToHost);
|
2012-07-04 20:11:07 +08:00
|
|
|
cudaSafeCall( cudaDeviceSynchronize() );
|
2012-07-13 23:47:09 +08:00
|
|
|
|
|
|
|
step = classified;
|
|
|
|
|
2012-07-04 20:11:16 +08:00
|
|
|
cudaFree(dclassified);
|
|
|
|
return step;
|
2012-06-26 00:39:34 +08:00
|
|
|
}
|
2012-06-22 23:00:36 +08:00
|
|
|
|
|
|
|
// ============ old fashioned haar cascade ==============================================//
|
2011-01-13 21:04:00 +08:00
|
|
|
struct cv::gpu::CascadeClassifier_GPU::CascadeClassifierImpl
|
2011-04-07 20:59:01 +08:00
|
|
|
{
|
2011-01-13 21:04:00 +08:00
|
|
|
CascadeClassifierImpl(const string& filename) : lastAllocatedFrameSize(-1, -1)
|
|
|
|
{
|
2011-04-07 20:59:01 +08:00
|
|
|
ncvSetDebugOutputHandler(NCVDebugOutputHandler);
|
2011-10-19 17:53:22 +08:00
|
|
|
ncvSafeCall( load(filename) );
|
2011-04-07 20:59:01 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
NCVStatus process(const GpuMat& src, GpuMat& objects, float scaleStep, int minNeighbors,
|
|
|
|
bool findLargestObject, bool visualizeInPlace, NcvSize32u ncvMinSize,
|
|
|
|
/*out*/unsigned int& numDetections)
|
|
|
|
{
|
|
|
|
calculateMemReqsAndAllocate(src.size());
|
2010-12-17 23:41:26 +08:00
|
|
|
|
2011-01-13 21:04:00 +08:00
|
|
|
NCVMemPtr src_beg;
|
|
|
|
src_beg.ptr = (void*)src.ptr<Ncv8u>();
|
|
|
|
src_beg.memtype = NCVMemoryTypeDevice;
|
2010-12-17 23:41:26 +08:00
|
|
|
|
2011-01-13 21:04:00 +08:00
|
|
|
NCVMemSegment src_seg;
|
|
|
|
src_seg.begin = src_beg;
|
|
|
|
src_seg.size = src.step * src.rows;
|
2010-12-17 23:41:26 +08:00
|
|
|
|
2011-08-08 19:28:14 +08:00
|
|
|
NCVMatrixReuse<Ncv8u> d_src(src_seg, static_cast<int>(devProp.textureAlignment), src.cols, src.rows, static_cast<int>(src.step), true);
|
2011-04-07 20:59:01 +08:00
|
|
|
ncvAssertReturn(d_src.isMemReused(), NCV_ALLOCATOR_BAD_REUSE);
|
2010-12-17 23:41:26 +08:00
|
|
|
|
2011-01-13 21:04:00 +08:00
|
|
|
CV_Assert(objects.rows == 1);
|
|
|
|
|
|
|
|
NCVMemPtr objects_beg;
|
|
|
|
objects_beg.ptr = (void*)objects.ptr<NcvRect32u>();
|
|
|
|
objects_beg.memtype = NCVMemoryTypeDevice;
|
|
|
|
|
|
|
|
NCVMemSegment objects_seg;
|
|
|
|
objects_seg.begin = objects_beg;
|
|
|
|
objects_seg.size = objects.step * objects.rows;
|
|
|
|
NCVVectorReuse<NcvRect32u> d_rects(objects_seg, objects.cols);
|
2011-04-07 20:59:01 +08:00
|
|
|
ncvAssertReturn(d_rects.isMemReused(), NCV_ALLOCATOR_BAD_REUSE);
|
|
|
|
|
2011-01-13 21:04:00 +08:00
|
|
|
NcvSize32u roi;
|
|
|
|
roi.width = d_src.width();
|
|
|
|
roi.height = d_src.height();
|
|
|
|
|
|
|
|
Ncv32u flags = 0;
|
|
|
|
flags |= findLargestObject? NCVPipeObjDet_FindLargestObject : 0;
|
|
|
|
flags |= visualizeInPlace ? NCVPipeObjDet_VisualizeInPlace : 0;
|
2011-04-07 20:59:01 +08:00
|
|
|
|
2011-01-13 21:04:00 +08:00
|
|
|
ncvStat = ncvDetectObjectsMultiScale_device(
|
|
|
|
d_src, roi, d_rects, numDetections, haar, *h_haarStages,
|
|
|
|
*d_haarStages, *d_haarNodes, *d_haarFeatures,
|
|
|
|
ncvMinSize,
|
|
|
|
minNeighbors,
|
|
|
|
scaleStep, 1,
|
|
|
|
flags,
|
2011-02-04 23:15:25 +08:00
|
|
|
*gpuAllocator, *cpuAllocator, devProp, 0);
|
2011-01-13 21:04:00 +08:00
|
|
|
ncvAssertReturnNcvStat(ncvStat);
|
|
|
|
ncvAssertCUDAReturn(cudaStreamSynchronize(0), NCV_CUDA_ERROR);
|
2011-04-07 20:59:01 +08:00
|
|
|
|
2011-01-13 21:04:00 +08:00
|
|
|
return NCV_SUCCESS;
|
|
|
|
}
|
2011-04-07 20:59:01 +08:00
|
|
|
|
|
|
|
|
2011-01-13 21:04:00 +08:00
|
|
|
NcvSize32u getClassifierSize() const { return haar.ClassifierSize; }
|
|
|
|
cv::Size getClassifierCvSize() const { return cv::Size(haar.ClassifierSize.width, haar.ClassifierSize.height); }
|
2011-04-07 20:59:01 +08:00
|
|
|
|
|
|
|
|
2011-01-13 21:04:00 +08:00
|
|
|
private:
|
|
|
|
|
2011-04-07 20:59:01 +08:00
|
|
|
|
2011-10-27 18:13:28 +08:00
|
|
|
static void NCVDebugOutputHandler(const std::string &msg) { CV_Error(CV_GpuApiCallError, msg.c_str()); }
|
2011-01-13 21:04:00 +08:00
|
|
|
|
2011-04-07 20:59:01 +08:00
|
|
|
|
2011-01-13 21:04:00 +08:00
|
|
|
NCVStatus load(const string& classifierFile)
|
2011-04-07 20:59:01 +08:00
|
|
|
{
|
|
|
|
int devId = cv::gpu::getDevice();
|
2011-01-13 21:04:00 +08:00
|
|
|
ncvAssertCUDAReturn(cudaGetDeviceProperties(&devProp, devId), NCV_CUDA_ERROR);
|
|
|
|
|
|
|
|
// Load the classifier from file (assuming its size is about 1 mb) using a simple allocator
|
2011-08-08 19:28:14 +08:00
|
|
|
gpuCascadeAllocator = new NCVMemNativeAllocator(NCVMemoryTypeDevice, static_cast<int>(devProp.textureAlignment));
|
|
|
|
cpuCascadeAllocator = new NCVMemNativeAllocator(NCVMemoryTypeHostPinned, static_cast<int>(devProp.textureAlignment));
|
2011-01-13 21:04:00 +08:00
|
|
|
|
|
|
|
ncvAssertPrintReturn(gpuCascadeAllocator->isInitialized(), "Error creating cascade GPU allocator", NCV_CUDA_ERROR);
|
|
|
|
ncvAssertPrintReturn(cpuCascadeAllocator->isInitialized(), "Error creating cascade CPU allocator", NCV_CUDA_ERROR);
|
|
|
|
|
|
|
|
Ncv32u haarNumStages, haarNumNodes, haarNumFeatures;
|
|
|
|
ncvStat = ncvHaarGetClassifierSize(classifierFile, haarNumStages, haarNumNodes, haarNumFeatures);
|
|
|
|
ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "Error reading classifier size (check the file)", NCV_FILE_ERROR);
|
|
|
|
|
2011-04-07 20:59:01 +08:00
|
|
|
h_haarStages = new NCVVectorAlloc<HaarStage64>(*cpuCascadeAllocator, haarNumStages);
|
2011-01-13 21:04:00 +08:00
|
|
|
h_haarNodes = new NCVVectorAlloc<HaarClassifierNode128>(*cpuCascadeAllocator, haarNumNodes);
|
|
|
|
h_haarFeatures = new NCVVectorAlloc<HaarFeature64>(*cpuCascadeAllocator, haarNumFeatures);
|
|
|
|
|
|
|
|
ncvAssertPrintReturn(h_haarStages->isMemAllocated(), "Error in cascade CPU allocator", NCV_CUDA_ERROR);
|
2011-04-07 20:59:01 +08:00
|
|
|
ncvAssertPrintReturn(h_haarNodes->isMemAllocated(), "Error in cascade CPU allocator", NCV_CUDA_ERROR);
|
2011-01-13 21:04:00 +08:00
|
|
|
ncvAssertPrintReturn(h_haarFeatures->isMemAllocated(), "Error in cascade CPU allocator", NCV_CUDA_ERROR);
|
|
|
|
|
|
|
|
ncvStat = ncvHaarLoadFromFile_host(classifierFile, haar, *h_haarStages, *h_haarNodes, *h_haarFeatures);
|
|
|
|
ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "Error loading classifier", NCV_FILE_ERROR);
|
|
|
|
|
|
|
|
d_haarStages = new NCVVectorAlloc<HaarStage64>(*gpuCascadeAllocator, haarNumStages);
|
|
|
|
d_haarNodes = new NCVVectorAlloc<HaarClassifierNode128>(*gpuCascadeAllocator, haarNumNodes);
|
|
|
|
d_haarFeatures = new NCVVectorAlloc<HaarFeature64>(*gpuCascadeAllocator, haarNumFeatures);
|
|
|
|
|
|
|
|
ncvAssertPrintReturn(d_haarStages->isMemAllocated(), "Error in cascade GPU allocator", NCV_CUDA_ERROR);
|
2011-04-07 20:59:01 +08:00
|
|
|
ncvAssertPrintReturn(d_haarNodes->isMemAllocated(), "Error in cascade GPU allocator", NCV_CUDA_ERROR);
|
2011-01-13 21:04:00 +08:00
|
|
|
ncvAssertPrintReturn(d_haarFeatures->isMemAllocated(), "Error in cascade GPU allocator", NCV_CUDA_ERROR);
|
|
|
|
|
|
|
|
ncvStat = h_haarStages->copySolid(*d_haarStages, 0);
|
|
|
|
ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "Error copying cascade to GPU", NCV_CUDA_ERROR);
|
|
|
|
ncvStat = h_haarNodes->copySolid(*d_haarNodes, 0);
|
|
|
|
ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "Error copying cascade to GPU", NCV_CUDA_ERROR);
|
|
|
|
ncvStat = h_haarFeatures->copySolid(*d_haarFeatures, 0);
|
2011-04-07 20:59:01 +08:00
|
|
|
ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "Error copying cascade to GPU", NCV_CUDA_ERROR);
|
2011-01-13 21:04:00 +08:00
|
|
|
|
|
|
|
return NCV_SUCCESS;
|
|
|
|
}
|
2011-04-07 20:59:01 +08:00
|
|
|
|
2011-01-13 21:04:00 +08:00
|
|
|
|
|
|
|
NCVStatus calculateMemReqsAndAllocate(const Size& frameSize)
|
2011-04-07 20:59:01 +08:00
|
|
|
{
|
2011-01-13 21:04:00 +08:00
|
|
|
if (lastAllocatedFrameSize == frameSize)
|
2011-04-07 20:59:01 +08:00
|
|
|
{
|
2011-01-13 21:04:00 +08:00
|
|
|
return NCV_SUCCESS;
|
2011-04-07 20:59:01 +08:00
|
|
|
}
|
2011-01-13 21:04:00 +08:00
|
|
|
|
|
|
|
// Calculate memory requirements and create real allocators
|
2011-08-08 19:28:14 +08:00
|
|
|
NCVMemStackAllocator gpuCounter(static_cast<int>(devProp.textureAlignment));
|
|
|
|
NCVMemStackAllocator cpuCounter(static_cast<int>(devProp.textureAlignment));
|
2011-01-13 21:04:00 +08:00
|
|
|
|
2011-04-07 20:59:01 +08:00
|
|
|
ncvAssertPrintReturn(gpuCounter.isInitialized(), "Error creating GPU memory counter", NCV_CUDA_ERROR);
|
2011-01-13 21:04:00 +08:00
|
|
|
ncvAssertPrintReturn(cpuCounter.isInitialized(), "Error creating CPU memory counter", NCV_CUDA_ERROR);
|
2011-04-07 20:59:01 +08:00
|
|
|
|
2011-01-13 21:04:00 +08:00
|
|
|
NCVMatrixAlloc<Ncv8u> d_src(gpuCounter, frameSize.width, frameSize.height);
|
|
|
|
NCVMatrixAlloc<Ncv8u> h_src(cpuCounter, frameSize.width, frameSize.height);
|
|
|
|
|
2011-04-07 20:59:01 +08:00
|
|
|
ncvAssertReturn(d_src.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
|
2011-01-13 21:04:00 +08:00
|
|
|
ncvAssertReturn(h_src.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
|
|
|
|
|
2011-04-07 20:59:01 +08:00
|
|
|
NCVVectorAlloc<NcvRect32u> d_rects(gpuCounter, 100);
|
2011-01-13 21:04:00 +08:00
|
|
|
ncvAssertReturn(d_rects.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
|
|
|
|
|
|
|
|
NcvSize32u roi;
|
|
|
|
roi.width = d_src.width();
|
|
|
|
roi.height = d_src.height();
|
|
|
|
Ncv32u numDetections;
|
|
|
|
ncvStat = ncvDetectObjectsMultiScale_device(d_src, roi, d_rects, numDetections, haar, *h_haarStages,
|
2011-02-04 23:15:25 +08:00
|
|
|
*d_haarStages, *d_haarNodes, *d_haarFeatures, haar.ClassifierSize, 4, 1.2f, 1, 0, gpuCounter, cpuCounter, devProp, 0);
|
2011-01-13 21:04:00 +08:00
|
|
|
|
|
|
|
ncvAssertReturnNcvStat(ncvStat);
|
|
|
|
ncvAssertCUDAReturn(cudaStreamSynchronize(0), NCV_CUDA_ERROR);
|
2011-04-07 20:59:01 +08:00
|
|
|
|
2011-08-08 19:28:14 +08:00
|
|
|
gpuAllocator = new NCVMemStackAllocator(NCVMemoryTypeDevice, gpuCounter.maxSize(), static_cast<int>(devProp.textureAlignment));
|
|
|
|
cpuAllocator = new NCVMemStackAllocator(NCVMemoryTypeHostPinned, cpuCounter.maxSize(), static_cast<int>(devProp.textureAlignment));
|
2011-01-13 21:04:00 +08:00
|
|
|
|
|
|
|
ncvAssertPrintReturn(gpuAllocator->isInitialized(), "Error creating GPU memory allocator", NCV_CUDA_ERROR);
|
2011-04-07 20:59:01 +08:00
|
|
|
ncvAssertPrintReturn(cpuAllocator->isInitialized(), "Error creating CPU memory allocator", NCV_CUDA_ERROR);
|
2011-01-13 21:04:00 +08:00
|
|
|
return NCV_SUCCESS;
|
|
|
|
}
|
2011-04-07 20:59:01 +08:00
|
|
|
|
2011-01-13 21:04:00 +08:00
|
|
|
|
|
|
|
cudaDeviceProp devProp;
|
|
|
|
NCVStatus ncvStat;
|
|
|
|
|
2011-04-07 20:59:01 +08:00
|
|
|
Ptr<NCVMemNativeAllocator> gpuCascadeAllocator;
|
2011-01-13 21:04:00 +08:00
|
|
|
Ptr<NCVMemNativeAllocator> cpuCascadeAllocator;
|
|
|
|
|
2011-04-07 20:59:01 +08:00
|
|
|
Ptr<NCVVectorAlloc<HaarStage64> > h_haarStages;
|
2011-01-13 21:04:00 +08:00
|
|
|
Ptr<NCVVectorAlloc<HaarClassifierNode128> > h_haarNodes;
|
|
|
|
Ptr<NCVVectorAlloc<HaarFeature64> > h_haarFeatures;
|
|
|
|
|
|
|
|
HaarClassifierCascadeDescriptor haar;
|
|
|
|
|
|
|
|
Ptr<NCVVectorAlloc<HaarStage64> > d_haarStages;
|
|
|
|
Ptr<NCVVectorAlloc<HaarClassifierNode128> > d_haarNodes;
|
|
|
|
Ptr<NCVVectorAlloc<HaarFeature64> > d_haarFeatures;
|
|
|
|
|
|
|
|
Size lastAllocatedFrameSize;
|
|
|
|
|
2011-04-07 20:59:01 +08:00
|
|
|
Ptr<NCVMemStackAllocator> gpuAllocator;
|
2011-01-13 21:04:00 +08:00
|
|
|
Ptr<NCVMemStackAllocator> cpuAllocator;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
cv::gpu::CascadeClassifier_GPU::CascadeClassifier_GPU() : findLargestObject(false), visualizeInPlace(false), impl(0) {}
|
|
|
|
cv::gpu::CascadeClassifier_GPU::CascadeClassifier_GPU(const string& filename) : findLargestObject(false), visualizeInPlace(false), impl(0) { load(filename); }
|
|
|
|
cv::gpu::CascadeClassifier_GPU::~CascadeClassifier_GPU() { release(); }
|
|
|
|
bool cv::gpu::CascadeClassifier_GPU::empty() const { return impl == 0; }
|
|
|
|
void cv::gpu::CascadeClassifier_GPU::release() { if (impl) { delete impl; impl = 0; } }
|
|
|
|
|
2011-04-07 20:59:01 +08:00
|
|
|
|
2011-01-13 21:04:00 +08:00
|
|
|
bool cv::gpu::CascadeClassifier_GPU::load(const string& filename)
|
2011-04-07 20:59:01 +08:00
|
|
|
{
|
2011-01-13 21:04:00 +08:00
|
|
|
release();
|
|
|
|
impl = new CascadeClassifierImpl(filename);
|
2011-04-07 20:59:01 +08:00
|
|
|
return !this->empty();
|
2010-12-17 23:41:26 +08:00
|
|
|
}
|
|
|
|
|
2011-04-07 20:59:01 +08:00
|
|
|
|
2011-01-13 21:04:00 +08:00
|
|
|
Size cv::gpu::CascadeClassifier_GPU::getClassifierSize() const
|
2010-12-17 23:41:26 +08:00
|
|
|
{
|
2011-01-13 21:04:00 +08:00
|
|
|
return this->empty() ? Size() : impl->getClassifierCvSize();
|
2010-12-17 23:41:26 +08:00
|
|
|
}
|
2011-04-07 20:59:01 +08:00
|
|
|
|
|
|
|
|
2011-01-13 21:04:00 +08:00
|
|
|
int cv::gpu::CascadeClassifier_GPU::detectMultiScale( const GpuMat& image, GpuMat& objectsBuf, double scaleFactor, int minNeighbors, Size minSize)
|
2011-04-07 20:59:01 +08:00
|
|
|
{
|
2011-01-13 21:04:00 +08:00
|
|
|
CV_Assert( scaleFactor > 1 && image.depth() == CV_8U);
|
|
|
|
CV_Assert( !this->empty());
|
2011-04-07 20:59:01 +08:00
|
|
|
|
2011-01-13 21:04:00 +08:00
|
|
|
const int defaultObjSearchNum = 100;
|
|
|
|
if (objectsBuf.empty())
|
2011-04-07 20:59:01 +08:00
|
|
|
{
|
2011-01-13 21:04:00 +08:00
|
|
|
objectsBuf.create(1, defaultObjSearchNum, DataType<Rect>::type);
|
2011-04-07 20:59:01 +08:00
|
|
|
}
|
|
|
|
|
2011-01-13 21:04:00 +08:00
|
|
|
NcvSize32u ncvMinSize = impl->getClassifierSize();
|
2010-12-17 23:41:26 +08:00
|
|
|
|
2011-01-13 21:04:00 +08:00
|
|
|
if (ncvMinSize.width < (unsigned)minSize.width && ncvMinSize.height < (unsigned)minSize.height)
|
|
|
|
{
|
|
|
|
ncvMinSize.width = minSize.width;
|
|
|
|
ncvMinSize.height = minSize.height;
|
2011-04-07 20:59:01 +08:00
|
|
|
}
|
|
|
|
|
2011-01-13 21:04:00 +08:00
|
|
|
unsigned int numDetections;
|
2011-10-19 17:53:22 +08:00
|
|
|
ncvSafeCall( impl->process(image, objectsBuf, (float)scaleFactor, minNeighbors, findLargestObject, visualizeInPlace, ncvMinSize, numDetections) );
|
2011-01-13 21:04:00 +08:00
|
|
|
|
|
|
|
return numDetections;
|
2010-12-17 23:41:26 +08:00
|
|
|
}
|
|
|
|
|
2011-04-07 20:59:01 +08:00
|
|
|
|
2011-01-21 17:00:19 +08:00
|
|
|
struct RectConvert
|
|
|
|
{
|
2011-04-07 20:59:01 +08:00
|
|
|
Rect operator()(const NcvRect32u& nr) const { return Rect(nr.x, nr.y, nr.width, nr.height); }
|
|
|
|
NcvRect32u operator()(const Rect& nr) const
|
|
|
|
{
|
|
|
|
NcvRect32u rect;
|
|
|
|
rect.x = nr.x;
|
|
|
|
rect.y = nr.y;
|
|
|
|
rect.width = nr.width;
|
|
|
|
rect.height = nr.height;
|
|
|
|
return rect;
|
|
|
|
}
|
2011-01-21 17:00:19 +08:00
|
|
|
};
|
|
|
|
|
2011-04-07 20:59:01 +08:00
|
|
|
|
2011-01-21 17:00:19 +08:00
|
|
|
void groupRectangles(std::vector<NcvRect32u> &hypotheses, int groupThreshold, double eps, std::vector<Ncv32u> *weights)
|
|
|
|
{
|
2011-04-07 20:59:01 +08:00
|
|
|
vector<Rect> rects(hypotheses.size());
|
|
|
|
std::transform(hypotheses.begin(), hypotheses.end(), rects.begin(), RectConvert());
|
|
|
|
|
|
|
|
if (weights)
|
|
|
|
{
|
|
|
|
vector<int> weights_int;
|
|
|
|
weights_int.assign(weights->begin(), weights->end());
|
|
|
|
cv::groupRectangles(rects, weights_int, groupThreshold, eps);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
cv::groupRectangles(rects, groupThreshold, eps);
|
|
|
|
}
|
|
|
|
std::transform(rects.begin(), rects.end(), hypotheses.begin(), RectConvert());
|
|
|
|
hypotheses.resize(rects.size());
|
2011-01-21 17:00:19 +08:00
|
|
|
}
|
2011-01-13 21:04:00 +08:00
|
|
|
|
2011-04-07 20:59:01 +08:00
|
|
|
NCVStatus loadFromXML(const std::string &filename,
|
|
|
|
HaarClassifierCascadeDescriptor &haar,
|
|
|
|
std::vector<HaarStage64> &haarStages,
|
|
|
|
std::vector<HaarClassifierNode128> &haarClassifierNodes,
|
2011-01-13 21:04:00 +08:00
|
|
|
std::vector<HaarFeature64> &haarFeatures)
|
2010-12-17 23:41:26 +08:00
|
|
|
{
|
2011-01-13 21:04:00 +08:00
|
|
|
NCVStatus ncvStat;
|
|
|
|
|
|
|
|
haar.NumStages = 0;
|
|
|
|
haar.NumClassifierRootNodes = 0;
|
|
|
|
haar.NumClassifierTotalNodes = 0;
|
|
|
|
haar.NumFeatures = 0;
|
|
|
|
haar.ClassifierSize.width = 0;
|
2011-04-04 19:47:21 +08:00
|
|
|
haar.ClassifierSize.height = 0;
|
2011-01-13 21:04:00 +08:00
|
|
|
haar.bHasStumpsOnly = true;
|
|
|
|
haar.bNeedsTiltedII = false;
|
|
|
|
Ncv32u curMaxTreeDepth;
|
|
|
|
|
2011-04-04 19:47:21 +08:00
|
|
|
std::vector<char> xmlFileCont;
|
2011-01-13 21:04:00 +08:00
|
|
|
|
|
|
|
std::vector<HaarClassifierNode128> h_TmpClassifierNotRootNodes;
|
|
|
|
haarStages.resize(0);
|
|
|
|
haarClassifierNodes.resize(0);
|
2011-04-04 19:47:21 +08:00
|
|
|
haarFeatures.resize(0);
|
2011-04-07 20:59:01 +08:00
|
|
|
|
2011-01-13 21:04:00 +08:00
|
|
|
Ptr<CvHaarClassifierCascade> oldCascade = (CvHaarClassifierCascade*)cvLoad(filename.c_str(), 0, 0, 0);
|
|
|
|
if (oldCascade.empty())
|
2011-04-07 20:59:01 +08:00
|
|
|
{
|
2011-01-13 21:04:00 +08:00
|
|
|
return NCV_HAAR_XML_LOADING_EXCEPTION;
|
2011-04-07 20:59:01 +08:00
|
|
|
}
|
2011-04-04 19:47:21 +08:00
|
|
|
|
2011-01-13 21:04:00 +08:00
|
|
|
haar.ClassifierSize.width = oldCascade->orig_window_size.width;
|
|
|
|
haar.ClassifierSize.height = oldCascade->orig_window_size.height;
|
|
|
|
|
|
|
|
int stagesCound = oldCascade->count;
|
|
|
|
for(int s = 0; s < stagesCound; ++s) // by stages
|
|
|
|
{
|
|
|
|
HaarStage64 curStage;
|
2011-08-08 19:28:14 +08:00
|
|
|
curStage.setStartClassifierRootNodeOffset(static_cast<Ncv32u>(haarClassifierNodes.size()));
|
2011-01-13 21:04:00 +08:00
|
|
|
|
|
|
|
curStage.setStageThreshold(oldCascade->stage_classifier[s].threshold);
|
|
|
|
|
|
|
|
int treesCount = oldCascade->stage_classifier[s].count;
|
2011-04-04 19:47:21 +08:00
|
|
|
for(int t = 0; t < treesCount; ++t) // by trees
|
|
|
|
{
|
2011-01-13 21:04:00 +08:00
|
|
|
Ncv32u nodeId = 0;
|
|
|
|
CvHaarClassifier* tree = &oldCascade->stage_classifier[s].classifier[t];
|
|
|
|
|
|
|
|
int nodesCount = tree->count;
|
2011-04-04 19:47:21 +08:00
|
|
|
for(int n = 0; n < nodesCount; ++n) //by features
|
|
|
|
{
|
2011-01-13 21:04:00 +08:00
|
|
|
CvHaarFeature* feature = &tree->haar_feature[n];
|
|
|
|
|
2011-04-04 19:47:21 +08:00
|
|
|
HaarClassifierNode128 curNode;
|
2011-01-13 21:04:00 +08:00
|
|
|
curNode.setThreshold(tree->threshold[n]);
|
2011-04-04 19:47:21 +08:00
|
|
|
|
|
|
|
NcvBool bIsLeftNodeLeaf = false;
|
|
|
|
NcvBool bIsRightNodeLeaf = false;
|
|
|
|
|
2011-01-13 21:04:00 +08:00
|
|
|
HaarClassifierNodeDescriptor32 nodeLeft;
|
|
|
|
if ( tree->left[n] <= 0 )
|
2011-04-07 20:59:01 +08:00
|
|
|
{
|
2011-01-13 21:04:00 +08:00
|
|
|
Ncv32f leftVal = tree->alpha[-tree->left[n]];
|
|
|
|
ncvStat = nodeLeft.create(leftVal);
|
2011-04-04 19:47:21 +08:00
|
|
|
ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat);
|
|
|
|
bIsLeftNodeLeaf = true;
|
2011-01-13 21:04:00 +08:00
|
|
|
}
|
|
|
|
else
|
2011-04-07 20:59:01 +08:00
|
|
|
{
|
2011-04-04 19:47:21 +08:00
|
|
|
Ncv32u leftNodeOffset = tree->left[n];
|
2011-01-13 21:04:00 +08:00
|
|
|
nodeLeft.create((Ncv32u)(h_TmpClassifierNotRootNodes.size() + leftNodeOffset - 1));
|
|
|
|
haar.bHasStumpsOnly = false;
|
|
|
|
}
|
|
|
|
curNode.setLeftNodeDesc(nodeLeft);
|
2011-04-04 19:47:21 +08:00
|
|
|
|
2011-01-13 21:04:00 +08:00
|
|
|
HaarClassifierNodeDescriptor32 nodeRight;
|
|
|
|
if ( tree->right[n] <= 0 )
|
2011-04-04 19:47:21 +08:00
|
|
|
{
|
|
|
|
Ncv32f rightVal = tree->alpha[-tree->right[n]];
|
2011-01-13 21:04:00 +08:00
|
|
|
ncvStat = nodeRight.create(rightVal);
|
|
|
|
ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat);
|
2011-04-04 19:47:21 +08:00
|
|
|
bIsRightNodeLeaf = true;
|
2011-01-13 21:04:00 +08:00
|
|
|
}
|
|
|
|
else
|
2011-04-04 19:47:21 +08:00
|
|
|
{
|
|
|
|
Ncv32u rightNodeOffset = tree->right[n];
|
2011-01-13 21:04:00 +08:00
|
|
|
nodeRight.create((Ncv32u)(h_TmpClassifierNotRootNodes.size() + rightNodeOffset - 1));
|
|
|
|
haar.bHasStumpsOnly = false;
|
|
|
|
}
|
2011-04-04 19:47:21 +08:00
|
|
|
curNode.setRightNodeDesc(nodeRight);
|
2011-01-13 21:04:00 +08:00
|
|
|
|
|
|
|
Ncv32u tiltedVal = feature->tilted;
|
2011-04-04 19:47:21 +08:00
|
|
|
haar.bNeedsTiltedII = (tiltedVal != 0);
|
2011-01-13 21:04:00 +08:00
|
|
|
|
2011-04-04 19:47:21 +08:00
|
|
|
Ncv32u featureId = 0;
|
2011-01-13 21:04:00 +08:00
|
|
|
for(int l = 0; l < CV_HAAR_FEATURE_MAX; ++l) //by rects
|
2011-04-07 20:59:01 +08:00
|
|
|
{
|
|
|
|
Ncv32u rectX = feature->rect[l].r.x;
|
2011-01-13 21:04:00 +08:00
|
|
|
Ncv32u rectY = feature->rect[l].r.y;
|
|
|
|
Ncv32u rectWidth = feature->rect[l].r.width;
|
|
|
|
Ncv32u rectHeight = feature->rect[l].r.height;
|
|
|
|
|
|
|
|
Ncv32f rectWeight = feature->rect[l].weight;
|
|
|
|
|
|
|
|
if (rectWeight == 0/* && rectX == 0 &&rectY == 0 && rectWidth == 0 && rectHeight == 0*/)
|
|
|
|
break;
|
|
|
|
|
|
|
|
HaarFeature64 curFeature;
|
|
|
|
ncvStat = curFeature.setRect(rectX, rectY, rectWidth, rectHeight, haar.ClassifierSize.width, haar.ClassifierSize.height);
|
|
|
|
curFeature.setWeight(rectWeight);
|
|
|
|
ncvAssertReturn(NCV_SUCCESS == ncvStat, ncvStat);
|
|
|
|
haarFeatures.push_back(curFeature);
|
|
|
|
|
|
|
|
featureId++;
|
|
|
|
}
|
|
|
|
|
|
|
|
HaarFeatureDescriptor32 tmpFeatureDesc;
|
2011-04-04 19:47:21 +08:00
|
|
|
ncvStat = tmpFeatureDesc.create(haar.bNeedsTiltedII, bIsLeftNodeLeaf, bIsRightNodeLeaf,
|
2011-08-08 19:28:14 +08:00
|
|
|
featureId, static_cast<Ncv32u>(haarFeatures.size()) - featureId);
|
2011-01-13 21:04:00 +08:00
|
|
|
ncvAssertReturn(NCV_SUCCESS == ncvStat, ncvStat);
|
|
|
|
curNode.setFeatureDesc(tmpFeatureDesc);
|
|
|
|
|
|
|
|
if (!nodeId)
|
|
|
|
{
|
|
|
|
//root node
|
|
|
|
haarClassifierNodes.push_back(curNode);
|
|
|
|
curMaxTreeDepth = 1;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
//other node
|
|
|
|
h_TmpClassifierNotRootNodes.push_back(curNode);
|
|
|
|
curMaxTreeDepth++;
|
|
|
|
}
|
|
|
|
|
|
|
|
nodeId++;
|
2011-04-04 19:47:21 +08:00
|
|
|
}
|
2011-01-13 21:04:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
curStage.setNumClassifierRootNodes(treesCount);
|
2011-04-04 19:47:21 +08:00
|
|
|
haarStages.push_back(curStage);
|
2011-01-13 21:04:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
//fill in cascade stats
|
2011-08-08 19:28:14 +08:00
|
|
|
haar.NumStages = static_cast<Ncv32u>(haarStages.size());
|
|
|
|
haar.NumClassifierRootNodes = static_cast<Ncv32u>(haarClassifierNodes.size());
|
|
|
|
haar.NumClassifierTotalNodes = static_cast<Ncv32u>(haar.NumClassifierRootNodes + h_TmpClassifierNotRootNodes.size());
|
|
|
|
haar.NumFeatures = static_cast<Ncv32u>(haarFeatures.size());
|
2011-01-13 21:04:00 +08:00
|
|
|
|
|
|
|
//merge root and leaf nodes in one classifiers array
|
2011-08-08 19:28:14 +08:00
|
|
|
Ncv32u offsetRoot = static_cast<Ncv32u>(haarClassifierNodes.size());
|
2011-01-13 21:04:00 +08:00
|
|
|
for (Ncv32u i=0; i<haarClassifierNodes.size(); i++)
|
|
|
|
{
|
2011-04-04 19:47:21 +08:00
|
|
|
HaarFeatureDescriptor32 featureDesc = haarClassifierNodes[i].getFeatureDesc();
|
|
|
|
|
2011-01-13 21:04:00 +08:00
|
|
|
HaarClassifierNodeDescriptor32 nodeLeft = haarClassifierNodes[i].getLeftNodeDesc();
|
2011-04-04 19:47:21 +08:00
|
|
|
if (!featureDesc.isLeftNodeLeaf())
|
2011-01-13 21:04:00 +08:00
|
|
|
{
|
|
|
|
Ncv32u newOffset = nodeLeft.getNextNodeOffset() + offsetRoot;
|
|
|
|
nodeLeft.create(newOffset);
|
|
|
|
}
|
|
|
|
haarClassifierNodes[i].setLeftNodeDesc(nodeLeft);
|
|
|
|
|
|
|
|
HaarClassifierNodeDescriptor32 nodeRight = haarClassifierNodes[i].getRightNodeDesc();
|
2011-04-04 19:47:21 +08:00
|
|
|
if (!featureDesc.isRightNodeLeaf())
|
2011-01-13 21:04:00 +08:00
|
|
|
{
|
|
|
|
Ncv32u newOffset = nodeRight.getNextNodeOffset() + offsetRoot;
|
|
|
|
nodeRight.create(newOffset);
|
|
|
|
}
|
|
|
|
haarClassifierNodes[i].setRightNodeDesc(nodeRight);
|
|
|
|
}
|
2011-04-07 20:59:01 +08:00
|
|
|
|
2011-01-13 21:04:00 +08:00
|
|
|
for (Ncv32u i=0; i<h_TmpClassifierNotRootNodes.size(); i++)
|
|
|
|
{
|
2011-04-04 19:47:21 +08:00
|
|
|
HaarFeatureDescriptor32 featureDesc = h_TmpClassifierNotRootNodes[i].getFeatureDesc();
|
|
|
|
|
2011-01-13 21:04:00 +08:00
|
|
|
HaarClassifierNodeDescriptor32 nodeLeft = h_TmpClassifierNotRootNodes[i].getLeftNodeDesc();
|
2011-04-04 19:47:21 +08:00
|
|
|
if (!featureDesc.isLeftNodeLeaf())
|
2011-01-13 21:04:00 +08:00
|
|
|
{
|
|
|
|
Ncv32u newOffset = nodeLeft.getNextNodeOffset() + offsetRoot;
|
|
|
|
nodeLeft.create(newOffset);
|
|
|
|
}
|
|
|
|
h_TmpClassifierNotRootNodes[i].setLeftNodeDesc(nodeLeft);
|
|
|
|
|
|
|
|
HaarClassifierNodeDescriptor32 nodeRight = h_TmpClassifierNotRootNodes[i].getRightNodeDesc();
|
2011-04-04 19:47:21 +08:00
|
|
|
if (!featureDesc.isRightNodeLeaf())
|
2011-01-13 21:04:00 +08:00
|
|
|
{
|
|
|
|
Ncv32u newOffset = nodeRight.getNextNodeOffset() + offsetRoot;
|
|
|
|
nodeRight.create(newOffset);
|
|
|
|
}
|
|
|
|
h_TmpClassifierNotRootNodes[i].setRightNodeDesc(nodeRight);
|
|
|
|
|
|
|
|
haarClassifierNodes.push_back(h_TmpClassifierNotRootNodes[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
return NCV_SUCCESS;
|
2010-12-17 23:41:26 +08:00
|
|
|
}
|
|
|
|
|
2011-01-13 21:04:00 +08:00
|
|
|
#endif /* HAVE_CUDA */
|