removed contrib, legacy and softcsscade modules; removed latentsvm and datamatrix detector from objdetect. removed haartraining and sft apps.
some of the stuff will be moved to opencv_contrib module. in order to make this PR pass buildbot, please, comment off opencv_legacy, opencv_contrib and opencv_softcascade test runs.
@ -1,6 +1,4 @@
|
||||
add_definitions(-D__OPENCV_BUILD=1)
|
||||
link_libraries(${OPENCV_LINKER_LIBS})
|
||||
|
||||
add_subdirectory(haartraining)
|
||||
add_subdirectory(traincascade)
|
||||
add_subdirectory(sft)
|
||||
|
@ -1,89 +0,0 @@
|
||||
SET(OPENCV_HAARTRAINING_DEPS opencv_core opencv_imgproc opencv_photo opencv_ml opencv_highgui opencv_objdetect opencv_calib3d opencv_video opencv_features2d opencv_flann opencv_legacy)
|
||||
ocv_check_dependencies(${OPENCV_HAARTRAINING_DEPS})
|
||||
|
||||
if(NOT OCV_DEPENDENCIES_FOUND)
|
||||
return()
|
||||
endif()
|
||||
|
||||
project(haartraining)
|
||||
|
||||
ocv_include_directories("${CMAKE_CURRENT_SOURCE_DIR}" "${OpenCV_SOURCE_DIR}/include/opencv")
|
||||
ocv_include_modules(${OPENCV_HAARTRAINING_DEPS})
|
||||
|
||||
if(WIN32)
|
||||
link_directories(${CMAKE_CURRENT_BINARY_DIR})
|
||||
endif()
|
||||
|
||||
link_libraries(${OPENCV_HAARTRAINING_DEPS} opencv_haartraining_engine)
|
||||
|
||||
# -----------------------------------------------------------
|
||||
# Library
|
||||
# -----------------------------------------------------------
|
||||
set(cvhaartraining_lib_src
|
||||
_cvcommon.h
|
||||
cvclassifier.h
|
||||
_cvhaartraining.h
|
||||
cvhaartraining.h
|
||||
cvboost.cpp
|
||||
cvcommon.cpp
|
||||
cvhaarclassifier.cpp
|
||||
cvhaartraining.cpp
|
||||
cvsamples.cpp
|
||||
)
|
||||
|
||||
add_library(opencv_haartraining_engine STATIC ${cvhaartraining_lib_src})
|
||||
set_target_properties(opencv_haartraining_engine PROPERTIES
|
||||
DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}"
|
||||
ARCHIVE_OUTPUT_DIRECTORY ${LIBRARY_OUTPUT_PATH}
|
||||
RUNTIME_OUTPUT_DIRECTORY ${EXECUTABLE_OUTPUT_PATH}
|
||||
INSTALL_NAME_DIR lib
|
||||
)
|
||||
|
||||
# -----------------------------------------------------------
|
||||
# haartraining
|
||||
# -----------------------------------------------------------
|
||||
|
||||
add_executable(opencv_haartraining cvhaartraining.h haartraining.cpp)
|
||||
set_target_properties(opencv_haartraining PROPERTIES
|
||||
DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}"
|
||||
OUTPUT_NAME "opencv_haartraining")
|
||||
|
||||
# -----------------------------------------------------------
|
||||
# createsamples
|
||||
# -----------------------------------------------------------
|
||||
|
||||
add_executable(opencv_createsamples cvhaartraining.h createsamples.cpp)
|
||||
set_target_properties(opencv_createsamples PROPERTIES
|
||||
DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}"
|
||||
OUTPUT_NAME "opencv_createsamples")
|
||||
|
||||
# -----------------------------------------------------------
|
||||
# performance
|
||||
# -----------------------------------------------------------
|
||||
add_executable(opencv_performance performance.cpp)
|
||||
set_target_properties(opencv_performance PROPERTIES
|
||||
DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}"
|
||||
OUTPUT_NAME "opencv_performance")
|
||||
|
||||
# -----------------------------------------------------------
|
||||
# Install part
|
||||
# -----------------------------------------------------------
|
||||
|
||||
if(INSTALL_CREATE_DISTRIB)
|
||||
if(BUILD_SHARED_LIBS)
|
||||
install(TARGETS opencv_haartraining RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} CONFIGURATIONS Release COMPONENT dev)
|
||||
install(TARGETS opencv_createsamples RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} CONFIGURATIONS Release COMPONENT dev)
|
||||
install(TARGETS opencv_performance RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} CONFIGURATIONS Release COMPONENT dev)
|
||||
endif()
|
||||
else()
|
||||
install(TARGETS opencv_haartraining RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} COMPONENT dev)
|
||||
install(TARGETS opencv_createsamples RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} COMPONENT dev)
|
||||
install(TARGETS opencv_performance RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} COMPONENT dev)
|
||||
endif()
|
||||
|
||||
if(ENABLE_SOLUTION_FOLDERS)
|
||||
set_target_properties(opencv_performance PROPERTIES FOLDER "applications")
|
||||
set_target_properties(opencv_createsamples PROPERTIES FOLDER "applications")
|
||||
set_target_properties(opencv_haartraining PROPERTIES FOLDER "applications")
|
||||
set_target_properties(opencv_haartraining_engine PROPERTIES FOLDER "applications")
|
||||
endif()
|
@ -1,92 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// Intel License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000, Intel Corporation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef __CVCOMMON_H_
|
||||
#define __CVCOMMON_H_
|
||||
|
||||
#include "opencv2/core.hpp"
|
||||
|
||||
#include "cxcore.h"
|
||||
#include "cv.h"
|
||||
#include "cxmisc.h"
|
||||
|
||||
#define __BEGIN__ __CV_BEGIN__
|
||||
#define __END__ __CV_END__
|
||||
#define EXIT __CV_EXIT__
|
||||
|
||||
#ifndef PATH_MAX
|
||||
#define PATH_MAX 512
|
||||
#endif /* PATH_MAX */
|
||||
|
||||
int icvMkDir( const char* filename );
|
||||
|
||||
/* returns index at specified position from index matrix of any type.
|
||||
if matrix is NULL, then specified position is returned */
|
||||
CV_INLINE
|
||||
int icvGetIdxAt( CvMat* idx, int pos );
|
||||
|
||||
CV_INLINE
|
||||
int icvGetIdxAt( CvMat* idx, int pos )
|
||||
{
|
||||
if( idx == NULL )
|
||||
{
|
||||
return pos;
|
||||
}
|
||||
else
|
||||
{
|
||||
CvScalar sc;
|
||||
int type;
|
||||
|
||||
type = CV_MAT_TYPE( idx->type );
|
||||
cvRawDataToScalar( idx->data.ptr + pos *
|
||||
( (idx->rows == 1) ? CV_ELEM_SIZE( type ) : idx->step ), type, &sc );
|
||||
|
||||
return (int) sc.val[0];
|
||||
}
|
||||
}
|
||||
|
||||
/* debug functions */
|
||||
|
||||
#define CV_DEBUG_SAVE( ptr ) icvSave( ptr, __FILE__, __LINE__ );
|
||||
|
||||
void icvSave( const CvArr* ptr, const char* filename, int line );
|
||||
|
||||
#endif /* __CVCOMMON_H_ */
|
@ -1,414 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// Intel License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000, Intel Corporation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
/*
|
||||
* _cvhaartraining.h
|
||||
*
|
||||
* training of cascade of boosted classifiers based on haar features
|
||||
*/
|
||||
|
||||
#ifndef __CVHAARTRAINING_H_
|
||||
#define __CVHAARTRAINING_H_
|
||||
|
||||
#include "_cvcommon.h"
|
||||
#include "cvclassifier.h"
|
||||
#include <cstring>
|
||||
#include <cstdio>
|
||||
|
||||
/* parameters for tree cascade classifier training */
|
||||
|
||||
/* max number of clusters */
|
||||
#define CV_MAX_CLUSTERS 3
|
||||
|
||||
/* term criteria for K-Means */
|
||||
#define CV_TERM_CRITERIA() cvTermCriteria( CV_TERMCRIT_EPS, 1000, 1E-5 )
|
||||
|
||||
/* print statistic info */
|
||||
#define CV_VERBOSE 1
|
||||
|
||||
#define CV_STAGE_CART_FILE_NAME "AdaBoostCARTHaarClassifier.txt"
|
||||
|
||||
#define CV_HAAR_FEATURE_MAX 3
|
||||
#define CV_HAAR_FEATURE_DESC_MAX 20
|
||||
|
||||
typedef int sum_type;
|
||||
typedef double sqsum_type;
|
||||
typedef short idx_type;
|
||||
|
||||
#define CV_SUM_MAT_TYPE CV_32SC1
|
||||
#define CV_SQSUM_MAT_TYPE CV_64FC1
|
||||
#define CV_IDX_MAT_TYPE CV_16SC1
|
||||
|
||||
#define CV_STUMP_TRAIN_PORTION 100
|
||||
|
||||
#define CV_THRESHOLD_EPS (0.00001F)
|
||||
|
||||
typedef struct CvTHaarFeature
|
||||
{
|
||||
char desc[CV_HAAR_FEATURE_DESC_MAX];
|
||||
int tilted;
|
||||
struct
|
||||
{
|
||||
CvRect r;
|
||||
float weight;
|
||||
} rect[CV_HAAR_FEATURE_MAX];
|
||||
} CvTHaarFeature;
|
||||
|
||||
typedef struct CvFastHaarFeature
|
||||
{
|
||||
int tilted;
|
||||
struct
|
||||
{
|
||||
int p0, p1, p2, p3;
|
||||
float weight;
|
||||
} rect[CV_HAAR_FEATURE_MAX];
|
||||
} CvFastHaarFeature;
|
||||
|
||||
typedef struct CvIntHaarFeatures
|
||||
{
|
||||
CvSize winsize;
|
||||
int count;
|
||||
CvTHaarFeature* feature;
|
||||
CvFastHaarFeature* fastfeature;
|
||||
} CvIntHaarFeatures;
|
||||
|
||||
CV_INLINE CvTHaarFeature cvHaarFeature( const char* desc,
|
||||
int x0, int y0, int w0, int h0, float wt0,
|
||||
int x1, int y1, int w1, int h1, float wt1,
|
||||
int x2 CV_DEFAULT( 0 ), int y2 CV_DEFAULT( 0 ),
|
||||
int w2 CV_DEFAULT( 0 ), int h2 CV_DEFAULT( 0 ),
|
||||
float wt2 CV_DEFAULT( 0.0F ) );
|
||||
|
||||
CV_INLINE CvTHaarFeature cvHaarFeature( const char* desc,
|
||||
int x0, int y0, int w0, int h0, float wt0,
|
||||
int x1, int y1, int w1, int h1, float wt1,
|
||||
int x2, int y2, int w2, int h2, float wt2 )
|
||||
{
|
||||
CvTHaarFeature hf;
|
||||
|
||||
assert( CV_HAAR_FEATURE_MAX >= 3 );
|
||||
assert( strlen( desc ) < CV_HAAR_FEATURE_DESC_MAX );
|
||||
|
||||
strcpy( &(hf.desc[0]), desc );
|
||||
hf.tilted = ( hf.desc[0] == 't' );
|
||||
|
||||
hf.rect[0].r.x = x0;
|
||||
hf.rect[0].r.y = y0;
|
||||
hf.rect[0].r.width = w0;
|
||||
hf.rect[0].r.height = h0;
|
||||
hf.rect[0].weight = wt0;
|
||||
|
||||
hf.rect[1].r.x = x1;
|
||||
hf.rect[1].r.y = y1;
|
||||
hf.rect[1].r.width = w1;
|
||||
hf.rect[1].r.height = h1;
|
||||
hf.rect[1].weight = wt1;
|
||||
|
||||
hf.rect[2].r.x = x2;
|
||||
hf.rect[2].r.y = y2;
|
||||
hf.rect[2].r.width = w2;
|
||||
hf.rect[2].r.height = h2;
|
||||
hf.rect[2].weight = wt2;
|
||||
|
||||
return hf;
|
||||
}
|
||||
|
||||
/* Prepared for training samples */
|
||||
typedef struct CvHaarTrainingData
|
||||
{
|
||||
CvSize winsize; /* training image size */
|
||||
int maxnum; /* maximum number of samples */
|
||||
CvMat sum; /* sum images (each row represents image) */
|
||||
CvMat tilted; /* tilted sum images (each row represents image) */
|
||||
CvMat normfactor; /* normalization factor */
|
||||
CvMat cls; /* classes. 1.0 - object, 0.0 - background */
|
||||
CvMat weights; /* weights */
|
||||
|
||||
CvMat* valcache; /* precalculated feature values (CV_32FC1) */
|
||||
CvMat* idxcache; /* presorted indices (CV_IDX_MAT_TYPE) */
|
||||
} CvHaarTrainigData;
|
||||
|
||||
|
||||
/* Passed to callback functions */
|
||||
typedef struct CvUserdata
|
||||
{
|
||||
CvHaarTrainingData* trainingData;
|
||||
CvIntHaarFeatures* haarFeatures;
|
||||
} CvUserdata;
|
||||
|
||||
CV_INLINE
|
||||
CvUserdata cvUserdata( CvHaarTrainingData* trainingData,
|
||||
CvIntHaarFeatures* haarFeatures );
|
||||
|
||||
CV_INLINE
|
||||
CvUserdata cvUserdata( CvHaarTrainingData* trainingData,
|
||||
CvIntHaarFeatures* haarFeatures )
|
||||
{
|
||||
CvUserdata userdata;
|
||||
|
||||
userdata.trainingData = trainingData;
|
||||
userdata.haarFeatures = haarFeatures;
|
||||
|
||||
return userdata;
|
||||
}
|
||||
|
||||
|
||||
#define CV_INT_HAAR_CLASSIFIER_FIELDS() \
|
||||
float (*eval)( CvIntHaarClassifier*, sum_type*, sum_type*, float ); \
|
||||
void (*save)( CvIntHaarClassifier*, FILE* file ); \
|
||||
void (*release)( CvIntHaarClassifier** );
|
||||
|
||||
/* internal weak classifier*/
|
||||
typedef struct CvIntHaarClassifier
|
||||
{
|
||||
CV_INT_HAAR_CLASSIFIER_FIELDS()
|
||||
} CvIntHaarClassifier;
|
||||
|
||||
/*
|
||||
* CART classifier
|
||||
*/
|
||||
typedef struct CvCARTHaarClassifier
|
||||
{
|
||||
CV_INT_HAAR_CLASSIFIER_FIELDS()
|
||||
|
||||
int count;
|
||||
int* compidx;
|
||||
CvTHaarFeature* feature;
|
||||
CvFastHaarFeature* fastfeature;
|
||||
float* threshold;
|
||||
int* left;
|
||||
int* right;
|
||||
float* val;
|
||||
} CvCARTHaarClassifier;
|
||||
|
||||
/* internal stage classifier */
|
||||
typedef struct CvStageHaarClassifier
|
||||
{
|
||||
CV_INT_HAAR_CLASSIFIER_FIELDS()
|
||||
|
||||
int count;
|
||||
float threshold;
|
||||
CvIntHaarClassifier** classifier;
|
||||
} CvStageHaarClassifier;
|
||||
|
||||
/* internal cascade classifier */
|
||||
typedef struct CvCascadeHaarClassifier
|
||||
{
|
||||
CV_INT_HAAR_CLASSIFIER_FIELDS()
|
||||
|
||||
int count;
|
||||
CvIntHaarClassifier** classifier;
|
||||
} CvCascadeHaarClassifier;
|
||||
|
||||
|
||||
/* internal tree cascade classifier node */
|
||||
typedef struct CvTreeCascadeNode
|
||||
{
|
||||
CvStageHaarClassifier* stage;
|
||||
|
||||
struct CvTreeCascadeNode* next;
|
||||
struct CvTreeCascadeNode* child;
|
||||
struct CvTreeCascadeNode* parent;
|
||||
|
||||
struct CvTreeCascadeNode* next_same_level;
|
||||
struct CvTreeCascadeNode* child_eval;
|
||||
int idx;
|
||||
int leaf;
|
||||
} CvTreeCascadeNode;
|
||||
|
||||
/* internal tree cascade classifier */
|
||||
typedef struct CvTreeCascadeClassifier
|
||||
{
|
||||
CV_INT_HAAR_CLASSIFIER_FIELDS()
|
||||
|
||||
CvTreeCascadeNode* root; /* root of the tree */
|
||||
CvTreeCascadeNode* root_eval; /* root node for the filtering */
|
||||
|
||||
int next_idx;
|
||||
} CvTreeCascadeClassifier;
|
||||
|
||||
|
||||
CV_INLINE float cvEvalFastHaarFeature( const CvFastHaarFeature* feature,
|
||||
const sum_type* sum, const sum_type* tilted )
|
||||
{
|
||||
const sum_type* img = feature->tilted ? tilted : sum;
|
||||
float ret = feature->rect[0].weight*
|
||||
(img[feature->rect[0].p0] - img[feature->rect[0].p1] -
|
||||
img[feature->rect[0].p2] + img[feature->rect[0].p3]) +
|
||||
feature->rect[1].weight*
|
||||
(img[feature->rect[1].p0] - img[feature->rect[1].p1] -
|
||||
img[feature->rect[1].p2] + img[feature->rect[1].p3]);
|
||||
|
||||
if( feature->rect[2].weight != 0.0f )
|
||||
ret += feature->rect[2].weight *
|
||||
( img[feature->rect[2].p0] - img[feature->rect[2].p1] -
|
||||
img[feature->rect[2].p2] + img[feature->rect[2].p3] );
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
typedef struct CvSampleDistortionData
|
||||
{
|
||||
IplImage* src;
|
||||
IplImage* erode;
|
||||
IplImage* dilate;
|
||||
IplImage* mask;
|
||||
IplImage* img;
|
||||
IplImage* maskimg;
|
||||
int dx;
|
||||
int dy;
|
||||
int bgcolor;
|
||||
} CvSampleDistortionData;
|
||||
|
||||
/*
|
||||
* icvConvertToFastHaarFeature
|
||||
*
|
||||
* Convert to fast representation of haar features
|
||||
*
|
||||
* haarFeature - input array
|
||||
* fastHaarFeature - output array
|
||||
* size - size of arrays
|
||||
* step - row step for the integral image
|
||||
*/
|
||||
void icvConvertToFastHaarFeature( CvTHaarFeature* haarFeature,
|
||||
CvFastHaarFeature* fastHaarFeature,
|
||||
int size, int step );
|
||||
|
||||
|
||||
void icvWriteVecHeader( FILE* file, int count, int width, int height );
|
||||
void icvWriteVecSample( FILE* file, CvArr* sample );
|
||||
void icvPlaceDistortedSample( CvArr* background,
|
||||
int inverse, int maxintensitydev,
|
||||
double maxxangle, double maxyangle, double maxzangle,
|
||||
int inscribe, double maxshiftf, double maxscalef,
|
||||
CvSampleDistortionData* data );
|
||||
void icvEndSampleDistortion( CvSampleDistortionData* data );
|
||||
|
||||
int icvStartSampleDistortion( const char* imgfilename, int bgcolor, int bgthreshold,
|
||||
CvSampleDistortionData* data );
|
||||
|
||||
typedef int (*CvGetHaarTrainingDataCallback)( CvMat* img, void* userdata );
|
||||
|
||||
typedef struct CvVecFile
|
||||
{
|
||||
FILE* input;
|
||||
int count;
|
||||
int vecsize;
|
||||
int last;
|
||||
short* vector;
|
||||
} CvVecFile;
|
||||
|
||||
int icvGetHaarTraininDataFromVecCallback( CvMat* img, void* userdata );
|
||||
|
||||
/*
|
||||
* icvGetHaarTrainingDataFromVec
|
||||
*
|
||||
* Fill <data> with samples from .vec file, passed <cascade>
|
||||
int icvGetHaarTrainingDataFromVec( CvHaarTrainingData* data, int first, int count,
|
||||
CvIntHaarClassifier* cascade,
|
||||
const char* filename,
|
||||
int* consumed );
|
||||
*/
|
||||
|
||||
CvIntHaarClassifier* icvCreateCARTHaarClassifier( int count );
|
||||
|
||||
void icvReleaseHaarClassifier( CvIntHaarClassifier** classifier );
|
||||
|
||||
void icvInitCARTHaarClassifier( CvCARTHaarClassifier* carthaar, CvCARTClassifier* cart,
|
||||
CvIntHaarFeatures* intHaarFeatures );
|
||||
|
||||
float icvEvalCARTHaarClassifier( CvIntHaarClassifier* classifier,
|
||||
sum_type* sum, sum_type* tilted, float normfactor );
|
||||
|
||||
CvIntHaarClassifier* icvCreateStageHaarClassifier( int count, float threshold );
|
||||
|
||||
void icvReleaseStageHaarClassifier( CvIntHaarClassifier** classifier );
|
||||
|
||||
float icvEvalStageHaarClassifier( CvIntHaarClassifier* classifier,
|
||||
sum_type* sum, sum_type* tilted, float normfactor );
|
||||
|
||||
CvIntHaarClassifier* icvCreateCascadeHaarClassifier( int count );
|
||||
|
||||
void icvReleaseCascadeHaarClassifier( CvIntHaarClassifier** classifier );
|
||||
|
||||
float icvEvalCascadeHaarClassifier( CvIntHaarClassifier* classifier,
|
||||
sum_type* sum, sum_type* tilted, float normfactor );
|
||||
|
||||
void icvSaveHaarFeature( CvTHaarFeature* feature, FILE* file );
|
||||
|
||||
void icvLoadHaarFeature( CvTHaarFeature* feature, FILE* file );
|
||||
|
||||
void icvSaveCARTHaarClassifier( CvIntHaarClassifier* classifier, FILE* file );
|
||||
|
||||
CvIntHaarClassifier* icvLoadCARTHaarClassifier( FILE* file, int step );
|
||||
|
||||
void icvSaveStageHaarClassifier( CvIntHaarClassifier* classifier, FILE* file );
|
||||
|
||||
CvIntHaarClassifier* icvLoadCARTStageHaarClassifier( const char* filename, int step );
|
||||
|
||||
|
||||
/* tree cascade classifier */
|
||||
|
||||
float icvEvalTreeCascadeClassifier( CvIntHaarClassifier* classifier,
|
||||
sum_type* sum, sum_type* tilted, float normfactor );
|
||||
|
||||
void icvSetLeafNode( CvTreeCascadeClassifier* tree, CvTreeCascadeNode* leaf );
|
||||
|
||||
float icvEvalTreeCascadeClassifierFilter( CvIntHaarClassifier* classifier, sum_type* sum,
|
||||
sum_type* tilted, float normfactor );
|
||||
|
||||
CvTreeCascadeNode* icvCreateTreeCascadeNode();
|
||||
|
||||
void icvReleaseTreeCascadeNodes( CvTreeCascadeNode** node );
|
||||
|
||||
void icvReleaseTreeCascadeClassifier( CvIntHaarClassifier** classifier );
|
||||
|
||||
/* Prints out current tree structure to <stdout> */
|
||||
void icvPrintTreeCascade( CvTreeCascadeNode* root );
|
||||
|
||||
/* Loads tree cascade classifier */
|
||||
CvIntHaarClassifier* icvLoadTreeCascadeClassifier( const char* filename, int step,
|
||||
int* splits );
|
||||
|
||||
/* Finds leaves belonging to maximal level and connects them via leaf->next_same_level */
|
||||
CvTreeCascadeNode* icvFindDeepestLeaves( CvTreeCascadeClassifier* tree );
|
||||
|
||||
#endif /* __CVHAARTRAINING_H_ */
|
@ -1,245 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// Intel License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000, Intel Corporation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
/*
|
||||
* createsamples.cpp
|
||||
*
|
||||
* Create test/training samples
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstring>
|
||||
#include <cstdlib>
|
||||
#include <cmath>
|
||||
#include <ctime>
|
||||
|
||||
using namespace std;
|
||||
|
||||
#include "cvhaartraining.h"
|
||||
|
||||
int main( int argc, char* argv[] )
|
||||
{
|
||||
int i = 0;
|
||||
char* nullname = (char*)"(NULL)";
|
||||
char* vecname = NULL; /* .vec file name */
|
||||
char* infoname = NULL; /* file name with marked up image descriptions */
|
||||
char* imagename = NULL; /* single sample image */
|
||||
char* bgfilename = NULL; /* background */
|
||||
int num = 1000;
|
||||
int bgcolor = 0;
|
||||
int bgthreshold = 80;
|
||||
int invert = 0;
|
||||
int maxintensitydev = 40;
|
||||
double maxxangle = 1.1;
|
||||
double maxyangle = 1.1;
|
||||
double maxzangle = 0.5;
|
||||
int showsamples = 0;
|
||||
/* the samples are adjusted to this scale in the sample preview window */
|
||||
double scale = 4.0;
|
||||
int width = 24;
|
||||
int height = 24;
|
||||
|
||||
srand((unsigned int)time(0));
|
||||
|
||||
if( argc == 1 )
|
||||
{
|
||||
printf( "Usage: %s\n [-info <collection_file_name>]\n"
|
||||
" [-img <image_file_name>]\n"
|
||||
" [-vec <vec_file_name>]\n"
|
||||
" [-bg <background_file_name>]\n [-num <number_of_samples = %d>]\n"
|
||||
" [-bgcolor <background_color = %d>]\n"
|
||||
" [-inv] [-randinv] [-bgthresh <background_color_threshold = %d>]\n"
|
||||
" [-maxidev <max_intensity_deviation = %d>]\n"
|
||||
" [-maxxangle <max_x_rotation_angle = %f>]\n"
|
||||
" [-maxyangle <max_y_rotation_angle = %f>]\n"
|
||||
" [-maxzangle <max_z_rotation_angle = %f>]\n"
|
||||
" [-show [<scale = %f>]]\n"
|
||||
" [-w <sample_width = %d>]\n [-h <sample_height = %d>]\n",
|
||||
argv[0], num, bgcolor, bgthreshold, maxintensitydev,
|
||||
maxxangle, maxyangle, maxzangle, scale, width, height );
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
for( i = 1; i < argc; ++i )
|
||||
{
|
||||
if( !strcmp( argv[i], "-info" ) )
|
||||
{
|
||||
infoname = argv[++i];
|
||||
}
|
||||
else if( !strcmp( argv[i], "-img" ) )
|
||||
{
|
||||
imagename = argv[++i];
|
||||
}
|
||||
else if( !strcmp( argv[i], "-vec" ) )
|
||||
{
|
||||
vecname = argv[++i];
|
||||
}
|
||||
else if( !strcmp( argv[i], "-bg" ) )
|
||||
{
|
||||
bgfilename = argv[++i];
|
||||
}
|
||||
else if( !strcmp( argv[i], "-num" ) )
|
||||
{
|
||||
num = atoi( argv[++i] );
|
||||
}
|
||||
else if( !strcmp( argv[i], "-bgcolor" ) )
|
||||
{
|
||||
bgcolor = atoi( argv[++i] );
|
||||
}
|
||||
else if( !strcmp( argv[i], "-bgthresh" ) )
|
||||
{
|
||||
bgthreshold = atoi( argv[++i] );
|
||||
}
|
||||
else if( !strcmp( argv[i], "-inv" ) )
|
||||
{
|
||||
invert = 1;
|
||||
}
|
||||
else if( !strcmp( argv[i], "-randinv" ) )
|
||||
{
|
||||
invert = CV_RANDOM_INVERT;
|
||||
}
|
||||
else if( !strcmp( argv[i], "-maxidev" ) )
|
||||
{
|
||||
maxintensitydev = atoi( argv[++i] );
|
||||
}
|
||||
else if( !strcmp( argv[i], "-maxxangle" ) )
|
||||
{
|
||||
maxxangle = atof( argv[++i] );
|
||||
}
|
||||
else if( !strcmp( argv[i], "-maxyangle" ) )
|
||||
{
|
||||
maxyangle = atof( argv[++i] );
|
||||
}
|
||||
else if( !strcmp( argv[i], "-maxzangle" ) )
|
||||
{
|
||||
maxzangle = atof( argv[++i] );
|
||||
}
|
||||
else if( !strcmp( argv[i], "-show" ) )
|
||||
{
|
||||
showsamples = 1;
|
||||
if( i+1 < argc && strlen( argv[i+1] ) > 0 && argv[i+1][0] != '-' )
|
||||
{
|
||||
double d;
|
||||
d = strtod( argv[i+1], 0 );
|
||||
if( d != -HUGE_VAL && d != HUGE_VAL && d > 0 ) scale = d;
|
||||
++i;
|
||||
}
|
||||
}
|
||||
else if( !strcmp( argv[i], "-w" ) )
|
||||
{
|
||||
width = atoi( argv[++i] );
|
||||
}
|
||||
else if( !strcmp( argv[i], "-h" ) )
|
||||
{
|
||||
height = atoi( argv[++i] );
|
||||
}
|
||||
}
|
||||
|
||||
printf( "Info file name: %s\n", ((infoname == NULL) ? nullname : infoname ) );
|
||||
printf( "Img file name: %s\n", ((imagename == NULL) ? nullname : imagename ) );
|
||||
printf( "Vec file name: %s\n", ((vecname == NULL) ? nullname : vecname ) );
|
||||
printf( "BG file name: %s\n", ((bgfilename == NULL) ? nullname : bgfilename ) );
|
||||
printf( "Num: %d\n", num );
|
||||
printf( "BG color: %d\n", bgcolor );
|
||||
printf( "BG threshold: %d\n", bgthreshold );
|
||||
printf( "Invert: %s\n", (invert == CV_RANDOM_INVERT) ? "RANDOM"
|
||||
: ( (invert) ? "TRUE" : "FALSE" ) );
|
||||
printf( "Max intensity deviation: %d\n", maxintensitydev );
|
||||
printf( "Max x angle: %g\n", maxxangle );
|
||||
printf( "Max y angle: %g\n", maxyangle );
|
||||
printf( "Max z angle: %g\n", maxzangle );
|
||||
printf( "Show samples: %s\n", (showsamples) ? "TRUE" : "FALSE" );
|
||||
if( showsamples )
|
||||
{
|
||||
printf( "Scale: %g\n", scale );
|
||||
}
|
||||
printf( "Width: %d\n", width );
|
||||
printf( "Height: %d\n", height );
|
||||
|
||||
/* determine action */
|
||||
if( imagename && vecname )
|
||||
{
|
||||
printf( "Create training samples from single image applying distortions...\n" );
|
||||
|
||||
cvCreateTrainingSamples( vecname, imagename, bgcolor, bgthreshold, bgfilename,
|
||||
num, invert, maxintensitydev,
|
||||
maxxangle, maxyangle, maxzangle,
|
||||
showsamples, width, height );
|
||||
|
||||
printf( "Done\n" );
|
||||
}
|
||||
else if( imagename && bgfilename && infoname )
|
||||
{
|
||||
printf( "Create test samples from single image applying distortions...\n" );
|
||||
|
||||
cvCreateTestSamples( infoname, imagename, bgcolor, bgthreshold, bgfilename, num,
|
||||
invert, maxintensitydev,
|
||||
maxxangle, maxyangle, maxzangle, showsamples, width, height );
|
||||
|
||||
printf( "Done\n" );
|
||||
}
|
||||
else if( infoname && vecname )
|
||||
{
|
||||
int total;
|
||||
|
||||
printf( "Create training samples from images collection...\n" );
|
||||
|
||||
total = cvCreateTrainingSamplesFromInfo( infoname, vecname, num, showsamples,
|
||||
width, height );
|
||||
|
||||
printf( "Done. Created %d samples\n", total );
|
||||
}
|
||||
else if( vecname )
|
||||
{
|
||||
printf( "View samples from vec file (press ESC to exit)...\n" );
|
||||
|
||||
cvShowVecSamples( vecname, width, height, scale );
|
||||
|
||||
printf( "Done\n" );
|
||||
}
|
||||
else
|
||||
{
|
||||
printf( "Nothing to do\n" );
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
@ -1,729 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// Intel License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000, Intel Corporation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
/*
|
||||
* File cvclassifier.h
|
||||
*
|
||||
* Classifier types
|
||||
*/
|
||||
|
||||
#ifndef _CVCLASSIFIER_H_
|
||||
#define _CVCLASSIFIER_H_
|
||||
|
||||
#include <cmath>
|
||||
#include "cxcore.h"
|
||||
|
||||
#define CV_BOOST_API
|
||||
|
||||
/* Convert matrix to vector */
|
||||
#define CV_MAT2VEC( mat, vdata, vstep, num ) \
|
||||
assert( (mat).rows == 1 || (mat).cols == 1 ); \
|
||||
(vdata) = ((mat).data.ptr); \
|
||||
if( (mat).rows == 1 ) \
|
||||
{ \
|
||||
(vstep) = CV_ELEM_SIZE( (mat).type ); \
|
||||
(num) = (mat).cols; \
|
||||
} \
|
||||
else \
|
||||
{ \
|
||||
(vstep) = (mat).step; \
|
||||
(num) = (mat).rows; \
|
||||
}
|
||||
|
||||
/* Set up <sample> matrix header to be <num> sample of <trainData> samples matrix */
|
||||
#define CV_GET_SAMPLE( trainData, tdflags, num, sample ) \
|
||||
if( CV_IS_ROW_SAMPLE( tdflags ) ) \
|
||||
{ \
|
||||
cvInitMatHeader( &(sample), 1, (trainData).cols, \
|
||||
CV_MAT_TYPE( (trainData).type ), \
|
||||
((trainData).data.ptr + (num) * (trainData).step), \
|
||||
(trainData).step ); \
|
||||
} \
|
||||
else \
|
||||
{ \
|
||||
cvInitMatHeader( &(sample), (trainData).rows, 1, \
|
||||
CV_MAT_TYPE( (trainData).type ), \
|
||||
((trainData).data.ptr + (num) * CV_ELEM_SIZE( (trainData).type )), \
|
||||
(trainData).step ); \
|
||||
}
|
||||
|
||||
#define CV_GET_SAMPLE_STEP( trainData, tdflags, sstep ) \
|
||||
(sstep) = ( ( CV_IS_ROW_SAMPLE( tdflags ) ) \
|
||||
? (trainData).step : CV_ELEM_SIZE( (trainData).type ) );
|
||||
|
||||
|
||||
#define CV_LOGRATIO_THRESHOLD 0.00001F
|
||||
|
||||
/* log( val / (1 - val ) ) */
|
||||
CV_INLINE float cvLogRatio( float val );
|
||||
|
||||
CV_INLINE float cvLogRatio( float val )
|
||||
{
|
||||
float tval;
|
||||
|
||||
tval = MAX(CV_LOGRATIO_THRESHOLD, MIN( 1.0F - CV_LOGRATIO_THRESHOLD, (val) ));
|
||||
return logf( tval / (1.0F - tval) );
|
||||
}
|
||||
|
||||
|
||||
/* flags values for classifier consturctor flags parameter */
|
||||
|
||||
/* each trainData matrix column is a sample */
|
||||
#define CV_COL_SAMPLE 0
|
||||
|
||||
/* each trainData matrix row is a sample */
|
||||
#define CV_ROW_SAMPLE 1
|
||||
|
||||
#ifndef CV_IS_ROW_SAMPLE
|
||||
# define CV_IS_ROW_SAMPLE( flags ) ( ( flags ) & CV_ROW_SAMPLE )
|
||||
#endif
|
||||
|
||||
/* Classifier supports tune function */
|
||||
#define CV_TUNABLE (1 << 1)
|
||||
|
||||
#define CV_IS_TUNABLE( flags ) ( (flags) & CV_TUNABLE )
|
||||
|
||||
|
||||
/* classifier fields common to all classifiers */
|
||||
#define CV_CLASSIFIER_FIELDS() \
|
||||
int flags; \
|
||||
float(*eval)( struct CvClassifier*, CvMat* ); \
|
||||
void (*tune)( struct CvClassifier*, CvMat*, int flags, CvMat*, CvMat*, CvMat*, \
|
||||
CvMat*, CvMat* ); \
|
||||
int (*save)( struct CvClassifier*, const char* file_name ); \
|
||||
void (*release)( struct CvClassifier** );
|
||||
|
||||
typedef struct CvClassifier
|
||||
{
|
||||
CV_CLASSIFIER_FIELDS()
|
||||
} CvClassifier;
|
||||
|
||||
#define CV_CLASSIFIER_TRAIN_PARAM_FIELDS()
|
||||
typedef struct CvClassifierTrainParams
|
||||
{
|
||||
CV_CLASSIFIER_TRAIN_PARAM_FIELDS()
|
||||
} CvClassifierTrainParams;
|
||||
|
||||
|
||||
/*
|
||||
Common classifier constructor:
|
||||
CvClassifier* cvCreateMyClassifier( CvMat* trainData,
|
||||
int flags,
|
||||
CvMat* trainClasses,
|
||||
CvMat* typeMask,
|
||||
CvMat* missedMeasurementsMask CV_DEFAULT(0),
|
||||
CvCompIdx* compIdx CV_DEFAULT(0),
|
||||
CvMat* sampleIdx CV_DEFAULT(0),
|
||||
CvMat* weights CV_DEFAULT(0),
|
||||
CvClassifierTrainParams* trainParams CV_DEFAULT(0)
|
||||
)
|
||||
|
||||
*/
|
||||
|
||||
typedef CvClassifier* (*CvClassifierConstructor)( CvMat*, int, CvMat*, CvMat*, CvMat*,
|
||||
CvMat*, CvMat*, CvMat*,
|
||||
CvClassifierTrainParams* );
|
||||
|
||||
typedef enum CvStumpType
|
||||
{
|
||||
CV_CLASSIFICATION = 0,
|
||||
CV_CLASSIFICATION_CLASS = 1,
|
||||
CV_REGRESSION = 2
|
||||
} CvStumpType;
|
||||
|
||||
typedef enum CvStumpError
|
||||
{
|
||||
CV_MISCLASSIFICATION = 0,
|
||||
CV_GINI = 1,
|
||||
CV_ENTROPY = 2,
|
||||
CV_SQUARE = 3
|
||||
} CvStumpError;
|
||||
|
||||
|
||||
typedef struct CvStumpTrainParams
|
||||
{
|
||||
CV_CLASSIFIER_TRAIN_PARAM_FIELDS()
|
||||
CvStumpType type;
|
||||
CvStumpError error;
|
||||
} CvStumpTrainParams;
|
||||
|
||||
typedef struct CvMTStumpTrainParams
|
||||
{
|
||||
CV_CLASSIFIER_TRAIN_PARAM_FIELDS()
|
||||
CvStumpType type;
|
||||
CvStumpError error;
|
||||
int portion; /* number of components calculated in each thread */
|
||||
int numcomp; /* total number of components */
|
||||
|
||||
/* callback which fills <mat> with components [first, first+num[ */
|
||||
void (*getTrainData)( CvMat* mat, CvMat* sampleIdx, CvMat* compIdx,
|
||||
int first, int num, void* userdata );
|
||||
CvMat* sortedIdx; /* presorted samples indices */
|
||||
void* userdata; /* passed to callback */
|
||||
} CvMTStumpTrainParams;
|
||||
|
||||
typedef struct CvStumpClassifier
|
||||
{
|
||||
CV_CLASSIFIER_FIELDS()
|
||||
int compidx;
|
||||
|
||||
float lerror; /* impurity of the right node */
|
||||
float rerror; /* impurity of the left node */
|
||||
|
||||
float threshold;
|
||||
float left;
|
||||
float right;
|
||||
} CvStumpClassifier;
|
||||
|
||||
typedef struct CvCARTTrainParams
|
||||
{
|
||||
CV_CLASSIFIER_TRAIN_PARAM_FIELDS()
|
||||
/* desired number of internal nodes */
|
||||
int count;
|
||||
CvClassifierTrainParams* stumpTrainParams;
|
||||
CvClassifierConstructor stumpConstructor;
|
||||
|
||||
/*
|
||||
* Split sample indices <idx>
|
||||
* on the "left" indices <left> and "right" indices <right>
|
||||
* according to samples components <compidx> values and <threshold>.
|
||||
*
|
||||
* NOTE: Matrices <left> and <right> must be allocated using cvCreateMat function
|
||||
* since they are freed using cvReleaseMat function
|
||||
*
|
||||
* If it is NULL then the default implementation which evaluates training
|
||||
* samples from <trainData> passed to classifier constructor is used
|
||||
*/
|
||||
void (*splitIdx)( int compidx, float threshold,
|
||||
CvMat* idx, CvMat** left, CvMat** right,
|
||||
void* userdata );
|
||||
void* userdata;
|
||||
} CvCARTTrainParams;
|
||||
|
||||
typedef struct CvCARTClassifier
|
||||
{
|
||||
CV_CLASSIFIER_FIELDS()
|
||||
/* number of internal nodes */
|
||||
int count;
|
||||
|
||||
/* internal nodes (each array of <count> elements) */
|
||||
int* compidx;
|
||||
float* threshold;
|
||||
int* left;
|
||||
int* right;
|
||||
|
||||
/* leaves (array of <count>+1 elements) */
|
||||
float* val;
|
||||
} CvCARTClassifier;
|
||||
|
||||
CV_BOOST_API
|
||||
void cvGetSortedIndices( CvMat* val, CvMat* idx, int sortcols CV_DEFAULT( 0 ) );
|
||||
|
||||
CV_BOOST_API
|
||||
void cvReleaseStumpClassifier( CvClassifier** classifier );
|
||||
|
||||
CV_BOOST_API
|
||||
float cvEvalStumpClassifier( CvClassifier* classifier, CvMat* sample );
|
||||
|
||||
CV_BOOST_API
|
||||
CvClassifier* cvCreateStumpClassifier( CvMat* trainData,
|
||||
int flags,
|
||||
CvMat* trainClasses,
|
||||
CvMat* typeMask,
|
||||
CvMat* missedMeasurementsMask CV_DEFAULT(0),
|
||||
CvMat* compIdx CV_DEFAULT(0),
|
||||
CvMat* sampleIdx CV_DEFAULT(0),
|
||||
CvMat* weights CV_DEFAULT(0),
|
||||
CvClassifierTrainParams* trainParams CV_DEFAULT(0) );
|
||||
|
||||
/*
|
||||
* cvCreateMTStumpClassifier
|
||||
*
|
||||
* Multithreaded stump classifier constructor
|
||||
* Includes huge train data support through callback function
|
||||
*/
|
||||
CV_BOOST_API
|
||||
CvClassifier* cvCreateMTStumpClassifier( CvMat* trainData,
|
||||
int flags,
|
||||
CvMat* trainClasses,
|
||||
CvMat* typeMask,
|
||||
CvMat* missedMeasurementsMask,
|
||||
CvMat* compIdx,
|
||||
CvMat* sampleIdx,
|
||||
CvMat* weights,
|
||||
CvClassifierTrainParams* trainParams );
|
||||
|
||||
/*
|
||||
* cvCreateCARTClassifier
|
||||
*
|
||||
* CART classifier constructor
|
||||
*/
|
||||
CV_BOOST_API
|
||||
CvClassifier* cvCreateCARTClassifier( CvMat* trainData,
|
||||
int flags,
|
||||
CvMat* trainClasses,
|
||||
CvMat* typeMask,
|
||||
CvMat* missedMeasurementsMask,
|
||||
CvMat* compIdx,
|
||||
CvMat* sampleIdx,
|
||||
CvMat* weights,
|
||||
CvClassifierTrainParams* trainParams );
|
||||
|
||||
CV_BOOST_API
|
||||
void cvReleaseCARTClassifier( CvClassifier** classifier );
|
||||
|
||||
CV_BOOST_API
|
||||
float cvEvalCARTClassifier( CvClassifier* classifier, CvMat* sample );
|
||||
|
||||
/****************************************************************************************\
|
||||
* Boosting *
|
||||
\****************************************************************************************/
|
||||
|
||||
/*
|
||||
* CvBoostType
|
||||
*
|
||||
* The CvBoostType enumeration specifies the boosting type.
|
||||
*
|
||||
* Remarks
|
||||
* Four different boosting variants for 2 class classification problems are supported:
|
||||
* Discrete AdaBoost, Real AdaBoost, LogitBoost and Gentle AdaBoost.
|
||||
* The L2 (2 class classification problems) and LK (K class classification problems)
|
||||
* algorithms are close to LogitBoost but more numerically stable than last one.
|
||||
* For regression three different loss functions are supported:
|
||||
* Least square, least absolute deviation and huber loss.
|
||||
*/
|
||||
typedef enum CvBoostType
|
||||
{
|
||||
CV_DABCLASS = 0, /* 2 class Discrete AdaBoost */
|
||||
CV_RABCLASS = 1, /* 2 class Real AdaBoost */
|
||||
CV_LBCLASS = 2, /* 2 class LogitBoost */
|
||||
CV_GABCLASS = 3, /* 2 class Gentle AdaBoost */
|
||||
CV_L2CLASS = 4, /* classification (2 class problem) */
|
||||
CV_LKCLASS = 5, /* classification (K class problem) */
|
||||
CV_LSREG = 6, /* least squares regression */
|
||||
CV_LADREG = 7, /* least absolute deviation regression */
|
||||
CV_MREG = 8 /* M-regression (Huber loss) */
|
||||
} CvBoostType;
|
||||
|
||||
/****************************************************************************************\
|
||||
* Iterative training functions *
|
||||
\****************************************************************************************/
|
||||
|
||||
/*
|
||||
* CvBoostTrainer
|
||||
*
|
||||
* The CvBoostTrainer structure represents internal boosting trainer.
|
||||
*/
|
||||
typedef struct CvBoostTrainer CvBoostTrainer;
|
||||
|
||||
/*
|
||||
* cvBoostStartTraining
|
||||
*
|
||||
* The cvBoostStartTraining function starts training process and calculates
|
||||
* response values and weights for the first weak classifier training.
|
||||
*
|
||||
* Parameters
|
||||
* trainClasses
|
||||
* Vector of classes of training samples classes. Each element must be 0 or 1 and
|
||||
* of type CV_32FC1.
|
||||
* weakTrainVals
|
||||
* Vector of response values for the first trained weak classifier.
|
||||
* Must be of type CV_32FC1.
|
||||
* weights
|
||||
* Weight vector of training samples for the first trained weak classifier.
|
||||
* Must be of type CV_32FC1.
|
||||
* type
|
||||
* Boosting type. CV_DABCLASS, CV_RABCLASS, CV_LBCLASS, CV_GABCLASS
|
||||
* types are supported.
|
||||
*
|
||||
* Return Values
|
||||
* The return value is a pointer to internal trainer structure which is used
|
||||
* to perform next training iterations.
|
||||
*
|
||||
* Remarks
|
||||
* weakTrainVals and weights must be allocated before calling the function
|
||||
* and of the same size as trainingClasses. Usually weights should be initialized
|
||||
* with 1.0 value.
|
||||
* The function calculates response values and weights for the first weak
|
||||
* classifier training and stores them into weakTrainVals and weights
|
||||
* respectively.
|
||||
* Note, the training of the weak classifier using weakTrainVals, weight,
|
||||
* trainingData is outside of this function.
|
||||
*/
|
||||
CV_BOOST_API
|
||||
CvBoostTrainer* cvBoostStartTraining( CvMat* trainClasses,
|
||||
CvMat* weakTrainVals,
|
||||
CvMat* weights,
|
||||
CvMat* sampleIdx,
|
||||
CvBoostType type );
|
||||
/*
|
||||
* cvBoostNextWeakClassifier
|
||||
*
|
||||
* The cvBoostNextWeakClassifier function performs next training
|
||||
* iteration and caluclates response values and weights for the next weak
|
||||
* classifier training.
|
||||
*
|
||||
* Parameters
|
||||
* weakEvalVals
|
||||
* Vector of values obtained by evaluation of each sample with
|
||||
* the last trained weak classifier (iteration i). Must be of CV_32FC1 type.
|
||||
* trainClasses
|
||||
* Vector of classes of training samples. Each element must be 0 or 1,
|
||||
* and of type CV_32FC1.
|
||||
* weakTrainVals
|
||||
* Vector of response values for the next weak classifier training
|
||||
* (iteration i+1). Must be of type CV_32FC1.
|
||||
* weights
|
||||
* Weight vector of training samples for the next weak classifier training
|
||||
* (iteration i+1). Must be of type CV_32FC1.
|
||||
* trainer
|
||||
* A pointer to internal trainer returned by the cvBoostStartTraining
|
||||
* function call.
|
||||
*
|
||||
* Return Values
|
||||
* The return value is the coefficient for the last trained weak classifier.
|
||||
*
|
||||
* Remarks
|
||||
* weakTrainVals and weights must be exactly the same vectors as used in
|
||||
* the cvBoostStartTraining function call and should not be modified.
|
||||
* The function calculates response values and weights for the next weak
|
||||
* classifier training and stores them into weakTrainVals and weights
|
||||
* respectively.
|
||||
* Note, the training of the weak classifier of iteration i+1 using
|
||||
* weakTrainVals, weight, trainingData is outside of this function.
|
||||
*/
|
||||
CV_BOOST_API
|
||||
float cvBoostNextWeakClassifier( CvMat* weakEvalVals,
|
||||
CvMat* trainClasses,
|
||||
CvMat* weakTrainVals,
|
||||
CvMat* weights,
|
||||
CvBoostTrainer* trainer );
|
||||
|
||||
/*
|
||||
* cvBoostEndTraining
|
||||
*
|
||||
* The cvBoostEndTraining function finishes training process and releases
|
||||
* internally allocated memory.
|
||||
*
|
||||
* Parameters
|
||||
* trainer
|
||||
* A pointer to a pointer to internal trainer returned by the cvBoostStartTraining
|
||||
* function call.
|
||||
*/
|
||||
CV_BOOST_API
|
||||
void cvBoostEndTraining( CvBoostTrainer** trainer );
|
||||
|
||||
/****************************************************************************************\
|
||||
* Boosted tree models *
|
||||
\****************************************************************************************/
|
||||
|
||||
/*
|
||||
* CvBtClassifier
|
||||
*
|
||||
* The CvBtClassifier structure represents boosted tree model.
|
||||
*
|
||||
* Members
|
||||
* flags
|
||||
* Flags. If CV_IS_TUNABLE( flags ) != 0 then the model supports tuning.
|
||||
* eval
|
||||
* Evaluation function. Returns sample predicted class (0, 1, etc.)
|
||||
* for classification or predicted value for regression.
|
||||
* tune
|
||||
* Tune function. If the model supports tuning then tune call performs
|
||||
* one more boosting iteration if passed to the function flags parameter
|
||||
* is CV_TUNABLE otherwise releases internally allocated for tuning memory
|
||||
* and makes the model untunable.
|
||||
* NOTE: Since tuning uses the pointers to parameters,
|
||||
* passed to the cvCreateBtClassifier function, they should not be modified
|
||||
* or released between tune calls.
|
||||
* save
|
||||
* This function stores the model into given file.
|
||||
* release
|
||||
* This function releases the model.
|
||||
* type
|
||||
* Boosted tree model type.
|
||||
* numclasses
|
||||
* Number of classes for CV_LKCLASS type or 1 for all other types.
|
||||
* numiter
|
||||
* Number of iterations. Number of weak classifiers is equal to number
|
||||
* of iterations for all types except CV_LKCLASS. For CV_LKCLASS type
|
||||
* number of weak classifiers is (numiter * numclasses).
|
||||
* numfeatures
|
||||
* Number of features in sample.
|
||||
* trees
|
||||
* Stores weak classifiers when the model does not support tuning.
|
||||
* seq
|
||||
* Stores weak classifiers when the model supports tuning.
|
||||
* trainer
|
||||
* Pointer to internal tuning parameters if the model supports tuning.
|
||||
*/
|
||||
typedef struct CvBtClassifier
|
||||
{
|
||||
CV_CLASSIFIER_FIELDS()
|
||||
|
||||
CvBoostType type;
|
||||
int numclasses;
|
||||
int numiter;
|
||||
int numfeatures;
|
||||
union
|
||||
{
|
||||
CvCARTClassifier** trees;
|
||||
CvSeq* seq;
|
||||
};
|
||||
void* trainer;
|
||||
} CvBtClassifier;
|
||||
|
||||
/*
|
||||
* CvBtClassifierTrainParams
|
||||
*
|
||||
* The CvBtClassifierTrainParams structure stores training parameters for
|
||||
* boosted tree model.
|
||||
*
|
||||
* Members
|
||||
* type
|
||||
* Boosted tree model type.
|
||||
* numiter
|
||||
* Desired number of iterations.
|
||||
* param
|
||||
* Parameter Model Type Parameter Meaning
|
||||
* param[0] Any Shrinkage factor
|
||||
* param[1] CV_MREG alpha. (1-alpha) determines "break-down" point of
|
||||
* the training procedure, i.e. the fraction of samples
|
||||
* that can be arbitrary modified without serious
|
||||
* degrading the quality of the result.
|
||||
* CV_DABCLASS, Weight trimming factor.
|
||||
* CV_RABCLASS,
|
||||
* CV_LBCLASS,
|
||||
* CV_GABCLASS,
|
||||
* CV_L2CLASS,
|
||||
* CV_LKCLASS
|
||||
* numsplits
|
||||
* Desired number of splits in each tree.
|
||||
*/
|
||||
typedef struct CvBtClassifierTrainParams
|
||||
{
|
||||
CV_CLASSIFIER_TRAIN_PARAM_FIELDS()
|
||||
|
||||
CvBoostType type;
|
||||
int numiter;
|
||||
float param[2];
|
||||
int numsplits;
|
||||
} CvBtClassifierTrainParams;
|
||||
|
||||
/*
|
||||
* cvCreateBtClassifier
|
||||
*
|
||||
* The cvCreateBtClassifier function creates boosted tree model.
|
||||
*
|
||||
* Parameters
|
||||
* trainData
|
||||
* Matrix of feature values. Must have CV_32FC1 type.
|
||||
* flags
|
||||
* Determines how samples are stored in trainData.
|
||||
* One of CV_ROW_SAMPLE or CV_COL_SAMPLE.
|
||||
* Optionally may be combined with CV_TUNABLE to make tunable model.
|
||||
* trainClasses
|
||||
* Vector of responses for regression or classes (0, 1, 2, etc.) for classification.
|
||||
* typeMask,
|
||||
* missedMeasurementsMask,
|
||||
* compIdx
|
||||
* Not supported. Must be NULL.
|
||||
* sampleIdx
|
||||
* Indices of samples used in training. If NULL then all samples are used.
|
||||
* For CV_DABCLASS, CV_RABCLASS, CV_LBCLASS and CV_GABCLASS must be NULL.
|
||||
* weights
|
||||
* Not supported. Must be NULL.
|
||||
* trainParams
|
||||
* A pointer to CvBtClassifierTrainParams structure. Training parameters.
|
||||
* See CvBtClassifierTrainParams description for details.
|
||||
*
|
||||
* Return Values
|
||||
* The return value is a pointer to created boosted tree model of type CvBtClassifier.
|
||||
*
|
||||
* Remarks
|
||||
* The function performs trainParams->numiter training iterations.
|
||||
* If CV_TUNABLE flag is specified then created model supports tuning.
|
||||
* In this case additional training iterations may be performed by
|
||||
* tune function call.
|
||||
*/
|
||||
CV_BOOST_API
|
||||
CvClassifier* cvCreateBtClassifier( CvMat* trainData,
|
||||
int flags,
|
||||
CvMat* trainClasses,
|
||||
CvMat* typeMask,
|
||||
CvMat* missedMeasurementsMask,
|
||||
CvMat* compIdx,
|
||||
CvMat* sampleIdx,
|
||||
CvMat* weights,
|
||||
CvClassifierTrainParams* trainParams );
|
||||
|
||||
/*
|
||||
* cvCreateBtClassifierFromFile
|
||||
*
|
||||
* The cvCreateBtClassifierFromFile function restores previously saved
|
||||
* boosted tree model from file.
|
||||
*
|
||||
* Parameters
|
||||
* filename
|
||||
* The name of the file with boosted tree model.
|
||||
*
|
||||
* Remarks
|
||||
* The restored model does not support tuning.
|
||||
*/
|
||||
CV_BOOST_API
|
||||
CvClassifier* cvCreateBtClassifierFromFile( const char* filename );
|
||||
|
||||
/****************************************************************************************\
|
||||
* Utility functions *
|
||||
\****************************************************************************************/
|
||||
|
||||
/*
|
||||
* cvTrimWeights
|
||||
*
|
||||
* The cvTrimWeights function performs weight trimming.
|
||||
*
|
||||
* Parameters
|
||||
* weights
|
||||
* Weights vector.
|
||||
* idx
|
||||
* Indices vector of weights that should be considered.
|
||||
* If it is NULL then all weights are used.
|
||||
* factor
|
||||
* Weight trimming factor. Must be in [0, 1] range.
|
||||
*
|
||||
* Return Values
|
||||
* The return value is a vector of indices. If all samples should be used then
|
||||
* it is equal to idx. In other case the cvReleaseMat function should be called
|
||||
* to release it.
|
||||
*
|
||||
* Remarks
|
||||
*/
|
||||
CV_BOOST_API
|
||||
CvMat* cvTrimWeights( CvMat* weights, CvMat* idx, float factor );
|
||||
|
||||
/*
|
||||
* cvReadTrainData
|
||||
*
|
||||
* The cvReadTrainData function reads feature values and responses from file.
|
||||
*
|
||||
* Parameters
|
||||
* filename
|
||||
* The name of the file to be read.
|
||||
* flags
|
||||
* One of CV_ROW_SAMPLE or CV_COL_SAMPLE. Determines how feature values
|
||||
* will be stored.
|
||||
* trainData
|
||||
* A pointer to a pointer to created matrix with feature values.
|
||||
* cvReleaseMat function should be used to destroy created matrix.
|
||||
* trainClasses
|
||||
* A pointer to a pointer to created matrix with response values.
|
||||
* cvReleaseMat function should be used to destroy created matrix.
|
||||
*
|
||||
* Remarks
|
||||
* File format:
|
||||
* ============================================
|
||||
* m n
|
||||
* value_1_1 value_1_2 ... value_1_n response_1
|
||||
* value_2_1 value_2_2 ... value_2_n response_2
|
||||
* ...
|
||||
* value_m_1 value_m_2 ... value_m_n response_m
|
||||
* ============================================
|
||||
* m
|
||||
* Number of samples
|
||||
* n
|
||||
* Number of features in each sample
|
||||
* value_i_j
|
||||
* Value of j-th feature of i-th sample
|
||||
* response_i
|
||||
* Response value of i-th sample
|
||||
* For classification problems responses represent classes (0, 1, etc.)
|
||||
* All values and classes are integer or real numbers.
|
||||
*/
|
||||
CV_BOOST_API
|
||||
void cvReadTrainData( const char* filename,
|
||||
int flags,
|
||||
CvMat** trainData,
|
||||
CvMat** trainClasses );
|
||||
|
||||
|
||||
/*
|
||||
* cvWriteTrainData
|
||||
*
|
||||
* The cvWriteTrainData function stores feature values and responses into file.
|
||||
*
|
||||
* Parameters
|
||||
* filename
|
||||
* The name of the file.
|
||||
* flags
|
||||
* One of CV_ROW_SAMPLE or CV_COL_SAMPLE. Determines how feature values
|
||||
* are stored.
|
||||
* trainData
|
||||
* Feature values matrix.
|
||||
* trainClasses
|
||||
* Response values vector.
|
||||
* sampleIdx
|
||||
* Vector of idicies of the samples that should be stored. If it is NULL
|
||||
* then all samples will be stored.
|
||||
*
|
||||
* Remarks
|
||||
* See the cvReadTrainData function for file format description.
|
||||
*/
|
||||
CV_BOOST_API
|
||||
void cvWriteTrainData( const char* filename,
|
||||
int flags,
|
||||
CvMat* trainData,
|
||||
CvMat* trainClasses,
|
||||
CvMat* sampleIdx );
|
||||
|
||||
/*
|
||||
* cvRandShuffle
|
||||
*
|
||||
* The cvRandShuffle function perfroms random shuffling of given vector.
|
||||
*
|
||||
* Parameters
|
||||
* vector
|
||||
* Vector that should be shuffled.
|
||||
* Must have CV_8UC1, CV_16SC1, CV_32SC1 or CV_32FC1 type.
|
||||
*/
|
||||
CV_BOOST_API
|
||||
void cvRandShuffleVec( CvMat* vector );
|
||||
|
||||
#endif /* _CVCLASSIFIER_H_ */
|
@ -1,125 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// Intel License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000, Intel Corporation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "_cvcommon.h"
|
||||
|
||||
#include <cstring>
|
||||
#include <ctime>
|
||||
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#ifdef _WIN32
|
||||
#include <direct.h>
|
||||
#endif /* _WIN32 */
|
||||
|
||||
int icvMkDir( const char* filename )
|
||||
{
|
||||
char path[PATH_MAX];
|
||||
char* p;
|
||||
int pos;
|
||||
|
||||
#ifdef _WIN32
|
||||
struct _stat st;
|
||||
#else /* _WIN32 */
|
||||
struct stat st;
|
||||
mode_t mode;
|
||||
|
||||
mode = 0755;
|
||||
#endif /* _WIN32 */
|
||||
|
||||
strcpy( path, filename );
|
||||
|
||||
p = path;
|
||||
for( ; ; )
|
||||
{
|
||||
pos = (int)strcspn( p, "/\\" );
|
||||
|
||||
if( pos == (int) strlen( p ) ) break;
|
||||
if( pos != 0 )
|
||||
{
|
||||
p[pos] = '\0';
|
||||
|
||||
#ifdef _WIN32
|
||||
if( p[pos-1] != ':' )
|
||||
{
|
||||
if( _stat( path, &st ) != 0 )
|
||||
{
|
||||
if( _mkdir( path ) != 0 ) return 0;
|
||||
}
|
||||
}
|
||||
#else /* _WIN32 */
|
||||
if( stat( path, &st ) != 0 )
|
||||
{
|
||||
if( mkdir( path, mode ) != 0 ) return 0;
|
||||
}
|
||||
#endif /* _WIN32 */
|
||||
}
|
||||
|
||||
p[pos] = '/';
|
||||
|
||||
p += pos + 1;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
#if 0
|
||||
/* debug functions */
|
||||
void icvSave( const CvArr* ptr, const char* filename, int line )
|
||||
{
|
||||
CvFileStorage* fs;
|
||||
char buf[PATH_MAX];
|
||||
const char* name;
|
||||
|
||||
name = strrchr( filename, '\\' );
|
||||
if( !name ) name = strrchr( filename, '/' );
|
||||
if( !name ) name = filename;
|
||||
else name++; /* skip '/' or '\\' */
|
||||
|
||||
sprintf( buf, "%s-%d-%d", name, line, time( NULL ) );
|
||||
fs = cvOpenFileStorage( buf, NULL, CV_STORAGE_WRITE_TEXT );
|
||||
if( !fs ) return;
|
||||
cvWrite( fs, "debug", ptr );
|
||||
cvReleaseFileStorage( &fs );
|
||||
}
|
||||
#endif // #if 0
|
||||
|
||||
/* End of file. */
|
@ -1,835 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// Intel License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000, Intel Corporation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
/*
|
||||
* cvhaarclassifier.cpp
|
||||
*
|
||||
* haar classifiers (stump, CART, stage, cascade)
|
||||
*/
|
||||
|
||||
#include "_cvhaartraining.h"
|
||||
|
||||
|
||||
CvIntHaarClassifier* icvCreateCARTHaarClassifier( int count )
|
||||
{
|
||||
CvCARTHaarClassifier* cart;
|
||||
size_t datasize;
|
||||
|
||||
datasize = sizeof( *cart ) +
|
||||
( sizeof( int ) +
|
||||
sizeof( CvTHaarFeature ) + sizeof( CvFastHaarFeature ) +
|
||||
sizeof( float ) + sizeof( int ) + sizeof( int ) ) * count +
|
||||
sizeof( float ) * (count + 1);
|
||||
|
||||
cart = (CvCARTHaarClassifier*) cvAlloc( datasize );
|
||||
memset( cart, 0, datasize );
|
||||
|
||||
cart->feature = (CvTHaarFeature*) (cart + 1);
|
||||
cart->fastfeature = (CvFastHaarFeature*) (cart->feature + count);
|
||||
cart->threshold = (float*) (cart->fastfeature + count);
|
||||
cart->left = (int*) (cart->threshold + count);
|
||||
cart->right = (int*) (cart->left + count);
|
||||
cart->val = (float*) (cart->right + count);
|
||||
cart->compidx = (int*) (cart->val + count + 1 );
|
||||
cart->count = count;
|
||||
cart->eval = icvEvalCARTHaarClassifier;
|
||||
cart->save = icvSaveCARTHaarClassifier;
|
||||
cart->release = icvReleaseHaarClassifier;
|
||||
|
||||
return (CvIntHaarClassifier*) cart;
|
||||
}
|
||||
|
||||
|
||||
void icvReleaseHaarClassifier( CvIntHaarClassifier** classifier )
|
||||
{
|
||||
cvFree( classifier );
|
||||
*classifier = NULL;
|
||||
}
|
||||
|
||||
|
||||
void icvInitCARTHaarClassifier( CvCARTHaarClassifier* carthaar, CvCARTClassifier* cart,
|
||||
CvIntHaarFeatures* intHaarFeatures )
|
||||
{
|
||||
int i;
|
||||
|
||||
for( i = 0; i < cart->count; i++ )
|
||||
{
|
||||
carthaar->feature[i] = intHaarFeatures->feature[cart->compidx[i]];
|
||||
carthaar->fastfeature[i] = intHaarFeatures->fastfeature[cart->compidx[i]];
|
||||
carthaar->threshold[i] = cart->threshold[i];
|
||||
carthaar->left[i] = cart->left[i];
|
||||
carthaar->right[i] = cart->right[i];
|
||||
carthaar->val[i] = cart->val[i];
|
||||
carthaar->compidx[i] = cart->compidx[i];
|
||||
}
|
||||
carthaar->count = cart->count;
|
||||
carthaar->val[cart->count] = cart->val[cart->count];
|
||||
}
|
||||
|
||||
|
||||
float icvEvalCARTHaarClassifier( CvIntHaarClassifier* classifier,
|
||||
sum_type* sum, sum_type* tilted, float normfactor )
|
||||
{
|
||||
int idx = 0;
|
||||
|
||||
do
|
||||
{
|
||||
if( cvEvalFastHaarFeature(
|
||||
((CvCARTHaarClassifier*) classifier)->fastfeature + idx, sum, tilted )
|
||||
< (((CvCARTHaarClassifier*) classifier)->threshold[idx] * normfactor) )
|
||||
{
|
||||
idx = ((CvCARTHaarClassifier*) classifier)->left[idx];
|
||||
}
|
||||
else
|
||||
{
|
||||
idx = ((CvCARTHaarClassifier*) classifier)->right[idx];
|
||||
}
|
||||
} while( idx > 0 );
|
||||
|
||||
return ((CvCARTHaarClassifier*) classifier)->val[-idx];
|
||||
}
|
||||
|
||||
|
||||
CvIntHaarClassifier* icvCreateStageHaarClassifier( int count, float threshold )
|
||||
{
|
||||
CvStageHaarClassifier* stage;
|
||||
size_t datasize;
|
||||
|
||||
datasize = sizeof( *stage ) + sizeof( CvIntHaarClassifier* ) * count;
|
||||
stage = (CvStageHaarClassifier*) cvAlloc( datasize );
|
||||
memset( stage, 0, datasize );
|
||||
|
||||
stage->count = count;
|
||||
stage->threshold = threshold;
|
||||
stage->classifier = (CvIntHaarClassifier**) (stage + 1);
|
||||
|
||||
stage->eval = icvEvalStageHaarClassifier;
|
||||
stage->save = icvSaveStageHaarClassifier;
|
||||
stage->release = icvReleaseStageHaarClassifier;
|
||||
|
||||
return (CvIntHaarClassifier*) stage;
|
||||
}
|
||||
|
||||
|
||||
void icvReleaseStageHaarClassifier( CvIntHaarClassifier** classifier )
|
||||
{
|
||||
int i;
|
||||
|
||||
for( i = 0; i < ((CvStageHaarClassifier*) *classifier)->count; i++ )
|
||||
{
|
||||
if( ((CvStageHaarClassifier*) *classifier)->classifier[i] != NULL )
|
||||
{
|
||||
((CvStageHaarClassifier*) *classifier)->classifier[i]->release(
|
||||
&(((CvStageHaarClassifier*) *classifier)->classifier[i]) );
|
||||
}
|
||||
}
|
||||
|
||||
cvFree( classifier );
|
||||
*classifier = NULL;
|
||||
}
|
||||
|
||||
|
||||
float icvEvalStageHaarClassifier( CvIntHaarClassifier* classifier,
|
||||
sum_type* sum, sum_type* tilted, float normfactor )
|
||||
{
|
||||
int i;
|
||||
float stage_sum;
|
||||
|
||||
stage_sum = 0.0F;
|
||||
for( i = 0; i < ((CvStageHaarClassifier*) classifier)->count; i++ )
|
||||
{
|
||||
stage_sum +=
|
||||
((CvStageHaarClassifier*) classifier)->classifier[i]->eval(
|
||||
((CvStageHaarClassifier*) classifier)->classifier[i],
|
||||
sum, tilted, normfactor );
|
||||
}
|
||||
|
||||
return stage_sum;
|
||||
}
|
||||
|
||||
|
||||
CvIntHaarClassifier* icvCreateCascadeHaarClassifier( int count )
|
||||
{
|
||||
CvCascadeHaarClassifier* ptr;
|
||||
size_t datasize;
|
||||
|
||||
datasize = sizeof( *ptr ) + sizeof( CvIntHaarClassifier* ) * count;
|
||||
ptr = (CvCascadeHaarClassifier*) cvAlloc( datasize );
|
||||
memset( ptr, 0, datasize );
|
||||
|
||||
ptr->count = count;
|
||||
ptr->classifier = (CvIntHaarClassifier**) (ptr + 1);
|
||||
|
||||
ptr->eval = icvEvalCascadeHaarClassifier;
|
||||
ptr->save = NULL;
|
||||
ptr->release = icvReleaseCascadeHaarClassifier;
|
||||
|
||||
return (CvIntHaarClassifier*) ptr;
|
||||
}
|
||||
|
||||
|
||||
void icvReleaseCascadeHaarClassifier( CvIntHaarClassifier** classifier )
|
||||
{
|
||||
int i;
|
||||
|
||||
for( i = 0; i < ((CvCascadeHaarClassifier*) *classifier)->count; i++ )
|
||||
{
|
||||
if( ((CvCascadeHaarClassifier*) *classifier)->classifier[i] != NULL )
|
||||
{
|
||||
((CvCascadeHaarClassifier*) *classifier)->classifier[i]->release(
|
||||
&(((CvCascadeHaarClassifier*) *classifier)->classifier[i]) );
|
||||
}
|
||||
}
|
||||
|
||||
cvFree( classifier );
|
||||
*classifier = NULL;
|
||||
}
|
||||
|
||||
|
||||
float icvEvalCascadeHaarClassifier( CvIntHaarClassifier* classifier,
|
||||
sum_type* sum, sum_type* tilted, float normfactor )
|
||||
{
|
||||
int i;
|
||||
|
||||
for( i = 0; i < ((CvCascadeHaarClassifier*) classifier)->count; i++ )
|
||||
{
|
||||
if( ((CvCascadeHaarClassifier*) classifier)->classifier[i]->eval(
|
||||
((CvCascadeHaarClassifier*) classifier)->classifier[i],
|
||||
sum, tilted, normfactor )
|
||||
< ( ((CvStageHaarClassifier*)
|
||||
((CvCascadeHaarClassifier*) classifier)->classifier[i])->threshold
|
||||
- CV_THRESHOLD_EPS) )
|
||||
{
|
||||
return 0.0;
|
||||
}
|
||||
}
|
||||
|
||||
return 1.0;
|
||||
}
|
||||
|
||||
|
||||
void icvSaveHaarFeature( CvTHaarFeature* feature, FILE* file )
|
||||
{
|
||||
fprintf( file, "%d\n", ( ( feature->rect[2].weight == 0.0F ) ? 2 : 3) );
|
||||
fprintf( file, "%d %d %d %d %d %d\n",
|
||||
feature->rect[0].r.x,
|
||||
feature->rect[0].r.y,
|
||||
feature->rect[0].r.width,
|
||||
feature->rect[0].r.height,
|
||||
0,
|
||||
(int) (feature->rect[0].weight) );
|
||||
fprintf( file, "%d %d %d %d %d %d\n",
|
||||
feature->rect[1].r.x,
|
||||
feature->rect[1].r.y,
|
||||
feature->rect[1].r.width,
|
||||
feature->rect[1].r.height,
|
||||
0,
|
||||
(int) (feature->rect[1].weight) );
|
||||
if( feature->rect[2].weight != 0.0F )
|
||||
{
|
||||
fprintf( file, "%d %d %d %d %d %d\n",
|
||||
feature->rect[2].r.x,
|
||||
feature->rect[2].r.y,
|
||||
feature->rect[2].r.width,
|
||||
feature->rect[2].r.height,
|
||||
0,
|
||||
(int) (feature->rect[2].weight) );
|
||||
}
|
||||
fprintf( file, "%s\n", &(feature->desc[0]) );
|
||||
}
|
||||
|
||||
|
||||
void icvLoadHaarFeature( CvTHaarFeature* feature, FILE* file )
|
||||
{
|
||||
int nrect;
|
||||
int j;
|
||||
int tmp;
|
||||
int weight;
|
||||
|
||||
nrect = 0;
|
||||
int values_read = fscanf( file, "%d", &nrect );
|
||||
CV_Assert(values_read == 1);
|
||||
|
||||
assert( nrect <= CV_HAAR_FEATURE_MAX );
|
||||
|
||||
for( j = 0; j < nrect; j++ )
|
||||
{
|
||||
values_read = fscanf( file, "%d %d %d %d %d %d",
|
||||
&(feature->rect[j].r.x),
|
||||
&(feature->rect[j].r.y),
|
||||
&(feature->rect[j].r.width),
|
||||
&(feature->rect[j].r.height),
|
||||
&tmp, &weight );
|
||||
CV_Assert(values_read == 6);
|
||||
feature->rect[j].weight = (float) weight;
|
||||
}
|
||||
for( j = nrect; j < CV_HAAR_FEATURE_MAX; j++ )
|
||||
{
|
||||
feature->rect[j].r.x = 0;
|
||||
feature->rect[j].r.y = 0;
|
||||
feature->rect[j].r.width = 0;
|
||||
feature->rect[j].r.height = 0;
|
||||
feature->rect[j].weight = 0.0f;
|
||||
}
|
||||
values_read = fscanf( file, "%s", &(feature->desc[0]) );
|
||||
CV_Assert(values_read == 1);
|
||||
feature->tilted = ( feature->desc[0] == 't' );
|
||||
}
|
||||
|
||||
|
||||
void icvSaveCARTHaarClassifier( CvIntHaarClassifier* classifier, FILE* file )
|
||||
{
|
||||
int i;
|
||||
int count;
|
||||
|
||||
count = ((CvCARTHaarClassifier*) classifier)->count;
|
||||
fprintf( file, "%d\n", count );
|
||||
for( i = 0; i < count; i++ )
|
||||
{
|
||||
icvSaveHaarFeature( &(((CvCARTHaarClassifier*) classifier)->feature[i]), file );
|
||||
fprintf( file, "%e %d %d\n",
|
||||
((CvCARTHaarClassifier*) classifier)->threshold[i],
|
||||
((CvCARTHaarClassifier*) classifier)->left[i],
|
||||
((CvCARTHaarClassifier*) classifier)->right[i] );
|
||||
}
|
||||
for( i = 0; i <= count; i++ )
|
||||
{
|
||||
fprintf( file, "%e ", ((CvCARTHaarClassifier*) classifier)->val[i] );
|
||||
}
|
||||
fprintf( file, "\n" );
|
||||
}
|
||||
|
||||
|
||||
CvIntHaarClassifier* icvLoadCARTHaarClassifier( FILE* file, int step )
|
||||
{
|
||||
CvCARTHaarClassifier* ptr;
|
||||
int i;
|
||||
int count;
|
||||
|
||||
ptr = NULL;
|
||||
int values_read = fscanf( file, "%d", &count );
|
||||
CV_Assert(values_read == 1);
|
||||
|
||||
if( count > 0 )
|
||||
{
|
||||
ptr = (CvCARTHaarClassifier*) icvCreateCARTHaarClassifier( count );
|
||||
for( i = 0; i < count; i++ )
|
||||
{
|
||||
icvLoadHaarFeature( &(ptr->feature[i]), file );
|
||||
values_read = fscanf( file, "%f %d %d", &(ptr->threshold[i]), &(ptr->left[i]),
|
||||
&(ptr->right[i]) );
|
||||
CV_Assert(values_read == 3);
|
||||
}
|
||||
for( i = 0; i <= count; i++ )
|
||||
{
|
||||
values_read = fscanf( file, "%f", &(ptr->val[i]) );
|
||||
CV_Assert(values_read == 1);
|
||||
}
|
||||
icvConvertToFastHaarFeature( ptr->feature, ptr->fastfeature, ptr->count, step );
|
||||
}
|
||||
|
||||
return (CvIntHaarClassifier*) ptr;
|
||||
}
|
||||
|
||||
|
||||
void icvSaveStageHaarClassifier( CvIntHaarClassifier* classifier, FILE* file )
|
||||
{
|
||||
int count;
|
||||
int i;
|
||||
float threshold;
|
||||
|
||||
count = ((CvStageHaarClassifier*) classifier)->count;
|
||||
fprintf( file, "%d\n", count );
|
||||
for( i = 0; i < count; i++ )
|
||||
{
|
||||
((CvStageHaarClassifier*) classifier)->classifier[i]->save(
|
||||
((CvStageHaarClassifier*) classifier)->classifier[i], file );
|
||||
}
|
||||
|
||||
threshold = ((CvStageHaarClassifier*) classifier)->threshold;
|
||||
|
||||
/* to be compatible with the previous implementation */
|
||||
/* threshold = 2.0F * ((CvStageHaarClassifier*) classifier)->threshold - count; */
|
||||
|
||||
fprintf( file, "%e\n", threshold );
|
||||
}
|
||||
|
||||
|
||||
|
||||
static CvIntHaarClassifier* icvLoadCARTStageHaarClassifierF( FILE* file, int step )
|
||||
{
|
||||
CvStageHaarClassifier* ptr = NULL;
|
||||
|
||||
//CV_FUNCNAME( "icvLoadCARTStageHaarClassifierF" );
|
||||
|
||||
__BEGIN__;
|
||||
|
||||
if( file != NULL )
|
||||
{
|
||||
int count;
|
||||
int i;
|
||||
float threshold;
|
||||
|
||||
count = 0;
|
||||
int values_read = fscanf( file, "%d", &count );
|
||||
CV_Assert(values_read == 1);
|
||||
if( count > 0 )
|
||||
{
|
||||
ptr = (CvStageHaarClassifier*) icvCreateStageHaarClassifier( count, 0.0F );
|
||||
for( i = 0; i < count; i++ )
|
||||
{
|
||||
ptr->classifier[i] = icvLoadCARTHaarClassifier( file, step );
|
||||
}
|
||||
|
||||
values_read = fscanf( file, "%f", &threshold );
|
||||
CV_Assert(values_read == 1);
|
||||
|
||||
ptr->threshold = threshold;
|
||||
/* to be compatible with the previous implementation */
|
||||
/* ptr->threshold = 0.5F * (threshold + count); */
|
||||
}
|
||||
if( feof( file ) )
|
||||
{
|
||||
ptr->release( (CvIntHaarClassifier**) &ptr );
|
||||
ptr = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
__END__;
|
||||
|
||||
return (CvIntHaarClassifier*) ptr;
|
||||
}
|
||||
|
||||
|
||||
CvIntHaarClassifier* icvLoadCARTStageHaarClassifier( const char* filename, int step )
|
||||
{
|
||||
CvIntHaarClassifier* ptr = NULL;
|
||||
|
||||
CV_FUNCNAME( "icvLoadCARTStageHaarClassifier" );
|
||||
|
||||
__BEGIN__;
|
||||
|
||||
FILE* file;
|
||||
|
||||
file = fopen( filename, "r" );
|
||||
if( file )
|
||||
{
|
||||
CV_CALL( ptr = icvLoadCARTStageHaarClassifierF( file, step ) );
|
||||
fclose( file );
|
||||
}
|
||||
|
||||
__END__;
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
/* tree cascade classifier */
|
||||
|
||||
/* evaluates a tree cascade classifier */
|
||||
|
||||
float icvEvalTreeCascadeClassifier( CvIntHaarClassifier* classifier,
|
||||
sum_type* sum, sum_type* tilted, float normfactor )
|
||||
{
|
||||
CvTreeCascadeNode* ptr;
|
||||
|
||||
ptr = ((CvTreeCascadeClassifier*) classifier)->root;
|
||||
|
||||
while( ptr )
|
||||
{
|
||||
if( ptr->stage->eval( (CvIntHaarClassifier*) ptr->stage,
|
||||
sum, tilted, normfactor )
|
||||
>= ptr->stage->threshold - CV_THRESHOLD_EPS )
|
||||
{
|
||||
ptr = ptr->child;
|
||||
}
|
||||
else
|
||||
{
|
||||
while( ptr && ptr->next == NULL ) ptr = ptr->parent;
|
||||
if( ptr == NULL ) return 0.0F;
|
||||
ptr = ptr->next;
|
||||
}
|
||||
}
|
||||
|
||||
return 1.0F;
|
||||
}
|
||||
|
||||
/* sets path int the tree form the root to the leaf node */
|
||||
|
||||
void icvSetLeafNode( CvTreeCascadeClassifier* tcc, CvTreeCascadeNode* leaf )
|
||||
{
|
||||
CV_FUNCNAME( "icvSetLeafNode" );
|
||||
|
||||
__BEGIN__;
|
||||
|
||||
CvTreeCascadeNode* ptr;
|
||||
|
||||
ptr = NULL;
|
||||
while( leaf )
|
||||
{
|
||||
leaf->child_eval = ptr;
|
||||
ptr = leaf;
|
||||
leaf = leaf->parent;
|
||||
}
|
||||
|
||||
leaf = tcc->root;
|
||||
while( leaf && leaf != ptr ) leaf = leaf->next;
|
||||
if( leaf != ptr )
|
||||
CV_ERROR( CV_StsError, "Invalid tcc or leaf node." );
|
||||
|
||||
tcc->root_eval = ptr;
|
||||
|
||||
__END__;
|
||||
}
|
||||
|
||||
/* evaluates a tree cascade classifier. used in filtering */
|
||||
|
||||
float icvEvalTreeCascadeClassifierFilter( CvIntHaarClassifier* classifier, sum_type* sum,
|
||||
sum_type* tilted, float normfactor )
|
||||
{
|
||||
CvTreeCascadeNode* ptr;
|
||||
//CvTreeCascadeClassifier* tree;
|
||||
|
||||
//tree = (CvTreeCascadeClassifier*) classifier;
|
||||
|
||||
|
||||
|
||||
ptr = ((CvTreeCascadeClassifier*) classifier)->root_eval;
|
||||
while( ptr )
|
||||
{
|
||||
if( ptr->stage->eval( (CvIntHaarClassifier*) ptr->stage,
|
||||
sum, tilted, normfactor )
|
||||
< ptr->stage->threshold - CV_THRESHOLD_EPS )
|
||||
{
|
||||
return 0.0F;
|
||||
}
|
||||
ptr = ptr->child_eval;
|
||||
}
|
||||
|
||||
return 1.0F;
|
||||
}
|
||||
|
||||
/* creates tree cascade node */
|
||||
|
||||
CvTreeCascadeNode* icvCreateTreeCascadeNode()
|
||||
{
|
||||
CvTreeCascadeNode* ptr = NULL;
|
||||
|
||||
CV_FUNCNAME( "icvCreateTreeCascadeNode" );
|
||||
|
||||
__BEGIN__;
|
||||
size_t data_size;
|
||||
|
||||
data_size = sizeof( *ptr );
|
||||
CV_CALL( ptr = (CvTreeCascadeNode*) cvAlloc( data_size ) );
|
||||
memset( ptr, 0, data_size );
|
||||
|
||||
__END__;
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
/* releases all tree cascade nodes accessible via links */
|
||||
|
||||
void icvReleaseTreeCascadeNodes( CvTreeCascadeNode** node )
|
||||
{
|
||||
//CV_FUNCNAME( "icvReleaseTreeCascadeNodes" );
|
||||
|
||||
__BEGIN__;
|
||||
|
||||
if( node && *node )
|
||||
{
|
||||
CvTreeCascadeNode* ptr;
|
||||
CvTreeCascadeNode* ptr_;
|
||||
|
||||
ptr = *node;
|
||||
|
||||
while( ptr )
|
||||
{
|
||||
while( ptr->child ) ptr = ptr->child;
|
||||
|
||||
if( ptr->stage ) ptr->stage->release( (CvIntHaarClassifier**) &ptr->stage );
|
||||
ptr_ = ptr;
|
||||
|
||||
while( ptr && ptr->next == NULL ) ptr = ptr->parent;
|
||||
if( ptr ) ptr = ptr->next;
|
||||
|
||||
cvFree( &ptr_ );
|
||||
}
|
||||
}
|
||||
|
||||
__END__;
|
||||
}
|
||||
|
||||
|
||||
/* releases tree cascade classifier */
|
||||
|
||||
void icvReleaseTreeCascadeClassifier( CvIntHaarClassifier** classifier )
|
||||
{
|
||||
if( classifier && *classifier )
|
||||
{
|
||||
icvReleaseTreeCascadeNodes( &((CvTreeCascadeClassifier*) *classifier)->root );
|
||||
cvFree( classifier );
|
||||
*classifier = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void icvPrintTreeCascade( CvTreeCascadeNode* root )
|
||||
{
|
||||
//CV_FUNCNAME( "icvPrintTreeCascade" );
|
||||
|
||||
__BEGIN__;
|
||||
|
||||
CvTreeCascadeNode* node;
|
||||
CvTreeCascadeNode* n;
|
||||
char buf0[256];
|
||||
char buf[256];
|
||||
int level;
|
||||
int i;
|
||||
int max_level;
|
||||
|
||||
node = root;
|
||||
level = max_level = 0;
|
||||
while( node )
|
||||
{
|
||||
while( node->child ) { node = node->child; level++; }
|
||||
if( level > max_level ) { max_level = level; }
|
||||
while( node && !node->next ) { node = node->parent; level--; }
|
||||
if( node ) node = node->next;
|
||||
}
|
||||
|
||||
printf( "\nTree Classifier\n" );
|
||||
printf( "Stage\n" );
|
||||
for( i = 0; i <= max_level; i++ ) printf( "+---" );
|
||||
printf( "+\n" );
|
||||
for( i = 0; i <= max_level; i++ ) printf( "|%3d", i );
|
||||
printf( "|\n" );
|
||||
for( i = 0; i <= max_level; i++ ) printf( "+---" );
|
||||
printf( "+\n\n" );
|
||||
|
||||
node = root;
|
||||
|
||||
buf[0] = 0;
|
||||
while( node )
|
||||
{
|
||||
sprintf( buf + strlen( buf ), "%3d", node->idx );
|
||||
while( node->child )
|
||||
{
|
||||
node = node->child;
|
||||
sprintf( buf + strlen( buf ),
|
||||
((node->idx < 10) ? "---%d" : ((node->idx < 100) ? "--%d" : "-%d")),
|
||||
node->idx );
|
||||
}
|
||||
printf( " %s\n", buf );
|
||||
|
||||
while( node && !node->next ) { node = node->parent; }
|
||||
if( node )
|
||||
{
|
||||
node = node->next;
|
||||
|
||||
n = node->parent;
|
||||
buf[0] = 0;
|
||||
while( n )
|
||||
{
|
||||
if( n->next )
|
||||
sprintf( buf0, " | %s", buf );
|
||||
else
|
||||
sprintf( buf0, " %s", buf );
|
||||
strcpy( buf, buf0 );
|
||||
n = n->parent;
|
||||
}
|
||||
printf( " %s |\n", buf );
|
||||
}
|
||||
}
|
||||
printf( "\n" );
|
||||
fflush( stdout );
|
||||
|
||||
__END__;
|
||||
}
|
||||
|
||||
|
||||
|
||||
CvIntHaarClassifier* icvLoadTreeCascadeClassifier( const char* filename, int step,
|
||||
int* splits )
|
||||
{
|
||||
CvTreeCascadeClassifier* ptr = NULL;
|
||||
CvTreeCascadeNode** nodes = NULL;
|
||||
|
||||
CV_FUNCNAME( "icvLoadTreeCascadeClassifier" );
|
||||
|
||||
__BEGIN__;
|
||||
|
||||
size_t data_size;
|
||||
CvStageHaarClassifier* stage;
|
||||
char stage_name[PATH_MAX];
|
||||
char* suffix;
|
||||
int i, num;
|
||||
FILE* f;
|
||||
int result, parent=0, next=0;
|
||||
int stub;
|
||||
|
||||
if( !splits ) splits = &stub;
|
||||
|
||||
*splits = 0;
|
||||
|
||||
data_size = sizeof( *ptr );
|
||||
|
||||
CV_CALL( ptr = (CvTreeCascadeClassifier*) cvAlloc( data_size ) );
|
||||
memset( ptr, 0, data_size );
|
||||
|
||||
ptr->eval = icvEvalTreeCascadeClassifier;
|
||||
ptr->release = icvReleaseTreeCascadeClassifier;
|
||||
|
||||
sprintf( stage_name, "%s/", filename );
|
||||
suffix = stage_name + strlen( stage_name );
|
||||
|
||||
for( i = 0; ; i++ )
|
||||
{
|
||||
sprintf( suffix, "%d/%s", i, CV_STAGE_CART_FILE_NAME );
|
||||
f = fopen( stage_name, "r" );
|
||||
if( !f ) break;
|
||||
fclose( f );
|
||||
}
|
||||
num = i;
|
||||
|
||||
if( num < 1 ) EXIT;
|
||||
|
||||
data_size = sizeof( *nodes ) * num;
|
||||
CV_CALL( nodes = (CvTreeCascadeNode**) cvAlloc( data_size ) );
|
||||
|
||||
for( i = 0; i < num; i++ )
|
||||
{
|
||||
sprintf( suffix, "%d/%s", i, CV_STAGE_CART_FILE_NAME );
|
||||
f = fopen( stage_name, "r" );
|
||||
CV_CALL( stage = (CvStageHaarClassifier*)
|
||||
icvLoadCARTStageHaarClassifierF( f, step ) );
|
||||
|
||||
result = ( f && stage ) ? fscanf( f, "%d%d", &parent, &next ) : 0;
|
||||
if( f ) fclose( f );
|
||||
|
||||
if( result != 2 )
|
||||
{
|
||||
num = i;
|
||||
break;
|
||||
}
|
||||
|
||||
printf( "Stage %d loaded\n", i );
|
||||
|
||||
if( parent >= i || (next != -1 && next != i + 1) )
|
||||
CV_ERROR( CV_StsError, "Invalid tree links" );
|
||||
|
||||
CV_CALL( nodes[i] = icvCreateTreeCascadeNode() );
|
||||
nodes[i]->stage = stage;
|
||||
nodes[i]->idx = i;
|
||||
nodes[i]->parent = (parent != -1 ) ? nodes[parent] : NULL;
|
||||
nodes[i]->next = ( next != -1 ) ? nodes[i] : NULL;
|
||||
nodes[i]->child = NULL;
|
||||
}
|
||||
for( i = 0; i < num; i++ )
|
||||
{
|
||||
if( nodes[i]->next )
|
||||
{
|
||||
(*splits)++;
|
||||
nodes[i]->next = nodes[i+1];
|
||||
}
|
||||
if( nodes[i]->parent && nodes[i]->parent->child == NULL )
|
||||
{
|
||||
nodes[i]->parent->child = nodes[i];
|
||||
}
|
||||
}
|
||||
ptr->root = nodes[0];
|
||||
ptr->next_idx = num;
|
||||
|
||||
__END__;
|
||||
|
||||
cvFree( &nodes );
|
||||
|
||||
return (CvIntHaarClassifier*) ptr;
|
||||
}
|
||||
|
||||
|
||||
CvTreeCascadeNode* icvFindDeepestLeaves( CvTreeCascadeClassifier* tcc )
|
||||
{
|
||||
CvTreeCascadeNode* leaves;
|
||||
|
||||
//CV_FUNCNAME( "icvFindDeepestLeaves" );
|
||||
|
||||
__BEGIN__;
|
||||
|
||||
int level, cur_level;
|
||||
CvTreeCascadeNode* ptr;
|
||||
CvTreeCascadeNode* last;
|
||||
|
||||
leaves = last = NULL;
|
||||
|
||||
ptr = tcc->root;
|
||||
level = -1;
|
||||
cur_level = 0;
|
||||
|
||||
/* find leaves with maximal level */
|
||||
while( ptr )
|
||||
{
|
||||
if( ptr->child ) { ptr = ptr->child; cur_level++; }
|
||||
else
|
||||
{
|
||||
if( cur_level == level )
|
||||
{
|
||||
last->next_same_level = ptr;
|
||||
ptr->next_same_level = NULL;
|
||||
last = ptr;
|
||||
}
|
||||
if( cur_level > level )
|
||||
{
|
||||
level = cur_level;
|
||||
leaves = last = ptr;
|
||||
ptr->next_same_level = NULL;
|
||||
}
|
||||
while( ptr && ptr->next == NULL ) { ptr = ptr->parent; cur_level--; }
|
||||
if( ptr ) ptr = ptr->next;
|
||||
}
|
||||
}
|
||||
|
||||
__END__;
|
||||
|
||||
return leaves;
|
||||
}
|
||||
|
||||
/* End of file. */
|
@ -1,192 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// Intel License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000, Intel Corporation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
/*
|
||||
* cvhaartraining.h
|
||||
*
|
||||
* haar training functions
|
||||
*/
|
||||
|
||||
#ifndef _CVHAARTRAINING_H_
|
||||
#define _CVHAARTRAINING_H_
|
||||
|
||||
/*
|
||||
* cvCreateTrainingSamples
|
||||
*
|
||||
* Create training samples applying random distortions to sample image and
|
||||
* store them in .vec file
|
||||
*
|
||||
* filename - .vec file name
|
||||
* imgfilename - sample image file name
|
||||
* bgcolor - background color for sample image
|
||||
* bgthreshold - background color threshold. Pixels those colors are in range
|
||||
* [bgcolor-bgthreshold, bgcolor+bgthreshold] are considered as transparent
|
||||
* bgfilename - background description file name. If not NULL samples
|
||||
* will be put on arbitrary background
|
||||
* count - desired number of samples
|
||||
* invert - if not 0 sample foreground pixels will be inverted
|
||||
* if invert == CV_RANDOM_INVERT then samples will be inverted randomly
|
||||
* maxintensitydev - desired max intensity deviation of foreground samples pixels
|
||||
* maxxangle - max rotation angles
|
||||
* maxyangle
|
||||
* maxzangle
|
||||
* showsamples - if not 0 samples will be shown
|
||||
* winwidth - desired samples width
|
||||
* winheight - desired samples height
|
||||
*/
|
||||
#define CV_RANDOM_INVERT 0x7FFFFFFF
|
||||
|
||||
void cvCreateTrainingSamples( const char* filename,
|
||||
const char* imgfilename, int bgcolor, int bgthreshold,
|
||||
const char* bgfilename, int count,
|
||||
int invert = 0, int maxintensitydev = 40,
|
||||
double maxxangle = 1.1,
|
||||
double maxyangle = 1.1,
|
||||
double maxzangle = 0.5,
|
||||
int showsamples = 0,
|
||||
int winwidth = 24, int winheight = 24 );
|
||||
|
||||
void cvCreateTestSamples( const char* infoname,
|
||||
const char* imgfilename, int bgcolor, int bgthreshold,
|
||||
const char* bgfilename, int count,
|
||||
int invert, int maxintensitydev,
|
||||
double maxxangle, double maxyangle, double maxzangle,
|
||||
int showsamples,
|
||||
int winwidth, int winheight );
|
||||
|
||||
/*
|
||||
* cvCreateTrainingSamplesFromInfo
|
||||
*
|
||||
* Create training samples from a set of marked up images and store them into .vec file
|
||||
* infoname - file in which marked up image descriptions are stored
|
||||
* num - desired number of samples
|
||||
* showsamples - if not 0 samples will be shown
|
||||
* winwidth - sample width
|
||||
* winheight - sample height
|
||||
*
|
||||
* Return number of successfully created samples
|
||||
*/
|
||||
int cvCreateTrainingSamplesFromInfo( const char* infoname, const char* vecfilename,
|
||||
int num,
|
||||
int showsamples,
|
||||
int winwidth, int winheight );
|
||||
|
||||
/*
|
||||
* cvShowVecSamples
|
||||
*
|
||||
* Shows samples stored in .vec file
|
||||
*
|
||||
* filename
|
||||
* .vec file name
|
||||
* winwidth
|
||||
* sample width
|
||||
* winheight
|
||||
* sample height
|
||||
* scale
|
||||
* the scale each sample is adjusted to
|
||||
*/
|
||||
void cvShowVecSamples( const char* filename, int winwidth, int winheight, double scale );
|
||||
|
||||
|
||||
/*
|
||||
* cvCreateCascadeClassifier
|
||||
*
|
||||
* Create cascade classifier
|
||||
* dirname - directory name in which cascade classifier will be created.
|
||||
* It must exist and contain subdirectories 0, 1, 2, ... (nstages-1).
|
||||
* vecfilename - name of .vec file with object's images
|
||||
* bgfilename - name of background description file
|
||||
* bg_vecfile - true if bgfilename represents a vec file with discrete negatives
|
||||
* npos - number of positive samples used in training of each stage
|
||||
* nneg - number of negative samples used in training of each stage
|
||||
* nstages - number of stages
|
||||
* numprecalculated - number of features being precalculated. Each precalculated feature
|
||||
* requires (number_of_samples*(sizeof( float ) + sizeof( short ))) bytes of memory
|
||||
* numsplits - number of binary splits in each weak classifier
|
||||
* 1 - stumps, 2 and more - trees.
|
||||
* minhitrate - desired min hit rate of each stage
|
||||
* maxfalsealarm - desired max false alarm of each stage
|
||||
* weightfraction - weight trimming parameter
|
||||
* mode - 0 - BASIC = Viola
|
||||
* 1 - CORE = All upright
|
||||
* 2 - ALL = All features
|
||||
* symmetric - if not 0 vertical symmetry is assumed
|
||||
* equalweights - if not 0 initial weights of all samples will be equal
|
||||
* winwidth - sample width
|
||||
* winheight - sample height
|
||||
* boosttype - type of applied boosting algorithm
|
||||
* 0 - Discrete AdaBoost
|
||||
* 1 - Real AdaBoost
|
||||
* 2 - LogitBoost
|
||||
* 3 - Gentle AdaBoost
|
||||
* stumperror - type of used error if Discrete AdaBoost algorithm is applied
|
||||
* 0 - misclassification error
|
||||
* 1 - gini error
|
||||
* 2 - entropy error
|
||||
*/
|
||||
void cvCreateCascadeClassifier( const char* dirname,
|
||||
const char* vecfilename,
|
||||
const char* bgfilename,
|
||||
int npos, int nneg, int nstages,
|
||||
int numprecalculated,
|
||||
int numsplits,
|
||||
float minhitrate = 0.995F, float maxfalsealarm = 0.5F,
|
||||
float weightfraction = 0.95F,
|
||||
int mode = 0, int symmetric = 1,
|
||||
int equalweights = 1,
|
||||
int winwidth = 24, int winheight = 24,
|
||||
int boosttype = 3, int stumperror = 0 );
|
||||
|
||||
void cvCreateTreeCascadeClassifier( const char* dirname,
|
||||
const char* vecfilename,
|
||||
const char* bgfilename,
|
||||
int npos, int nneg, int nstages,
|
||||
int numprecalculated,
|
||||
int numsplits,
|
||||
float minhitrate, float maxfalsealarm,
|
||||
float weightfraction,
|
||||
int mode, int symmetric,
|
||||
int equalweights,
|
||||
int winwidth, int winheight,
|
||||
int boosttype, int stumperror,
|
||||
int maxtreesplits, int minpos, bool bg_vecfile = false );
|
||||
|
||||
#endif /* _CVHAARTRAINING_H_ */
|
@ -1,953 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// Intel License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000, Intel Corporation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
/*
|
||||
* cvsamples.cpp
|
||||
*
|
||||
* support functions for training and test samples creation.
|
||||
*/
|
||||
|
||||
#include "cvhaartraining.h"
|
||||
#include "_cvhaartraining.h"
|
||||
|
||||
/* if ipl.h file is included then iplWarpPerspectiveQ function
|
||||
is used for image transformation during samples creation;
|
||||
otherwise internal cvWarpPerspective function is used */
|
||||
|
||||
//#include <ipl.h>
|
||||
|
||||
#include "cv.h"
|
||||
#include "highgui.h"
|
||||
|
||||
/* Calculates coefficients of perspective transformation
|
||||
* which maps <quad> into rectangle ((0,0), (w,0), (w,h), (h,0)):
|
||||
*
|
||||
* c00*xi + c01*yi + c02
|
||||
* ui = ---------------------
|
||||
* c20*xi + c21*yi + c22
|
||||
*
|
||||
* c10*xi + c11*yi + c12
|
||||
* vi = ---------------------
|
||||
* c20*xi + c21*yi + c22
|
||||
*
|
||||
* Coefficients are calculated by solving linear system:
|
||||
* / x0 y0 1 0 0 0 -x0*u0 -y0*u0 \ /c00\ /u0\
|
||||
* | x1 y1 1 0 0 0 -x1*u1 -y1*u1 | |c01| |u1|
|
||||
* | x2 y2 1 0 0 0 -x2*u2 -y2*u2 | |c02| |u2|
|
||||
* | x3 y3 1 0 0 0 -x3*u3 -y3*u3 |.|c10|=|u3|,
|
||||
* | 0 0 0 x0 y0 1 -x0*v0 -y0*v0 | |c11| |v0|
|
||||
* | 0 0 0 x1 y1 1 -x1*v1 -y1*v1 | |c12| |v1|
|
||||
* | 0 0 0 x2 y2 1 -x2*v2 -y2*v2 | |c20| |v2|
|
||||
* \ 0 0 0 x3 y3 1 -x3*v3 -y3*v3 / \c21/ \v3/
|
||||
*
|
||||
* where:
|
||||
* (xi, yi) = (quad[i][0], quad[i][1])
|
||||
* cij - coeffs[i][j], coeffs[2][2] = 1
|
||||
* (ui, vi) - rectangle vertices
|
||||
*/
|
||||
static void cvGetPerspectiveTransform( CvSize src_size, double quad[4][2],
|
||||
double coeffs[3][3] )
|
||||
{
|
||||
//CV_FUNCNAME( "cvWarpPerspective" );
|
||||
|
||||
__BEGIN__;
|
||||
|
||||
double a[8][8];
|
||||
double b[8];
|
||||
|
||||
CvMat A = cvMat( 8, 8, CV_64FC1, a );
|
||||
CvMat B = cvMat( 8, 1, CV_64FC1, b );
|
||||
CvMat X = cvMat( 8, 1, CV_64FC1, coeffs );
|
||||
|
||||
int i;
|
||||
for( i = 0; i < 4; ++i )
|
||||
{
|
||||
a[i][0] = quad[i][0]; a[i][1] = quad[i][1]; a[i][2] = 1;
|
||||
a[i][3] = a[i][4] = a[i][5] = a[i][6] = a[i][7] = 0;
|
||||
b[i] = 0;
|
||||
}
|
||||
for( i = 4; i < 8; ++i )
|
||||
{
|
||||
a[i][3] = quad[i-4][0]; a[i][4] = quad[i-4][1]; a[i][5] = 1;
|
||||
a[i][0] = a[i][1] = a[i][2] = a[i][6] = a[i][7] = 0;
|
||||
b[i] = 0;
|
||||
}
|
||||
|
||||
int u = src_size.width - 1;
|
||||
int v = src_size.height - 1;
|
||||
|
||||
a[1][6] = -quad[1][0] * u; a[1][7] = -quad[1][1] * u;
|
||||
a[2][6] = -quad[2][0] * u; a[2][7] = -quad[2][1] * u;
|
||||
b[1] = b[2] = u;
|
||||
|
||||
a[6][6] = -quad[2][0] * v; a[6][7] = -quad[2][1] * v;
|
||||
a[7][6] = -quad[3][0] * v; a[7][7] = -quad[3][1] * v;
|
||||
b[6] = b[7] = v;
|
||||
|
||||
cvSolve( &A, &B, &X );
|
||||
|
||||
coeffs[2][2] = 1;
|
||||
|
||||
__END__;
|
||||
}
|
||||
|
||||
/* Warps source into destination by a perspective transform */
|
||||
static void cvWarpPerspective( CvArr* src, CvArr* dst, double quad[4][2] )
|
||||
{
|
||||
CV_FUNCNAME( "cvWarpPerspective" );
|
||||
|
||||
__BEGIN__;
|
||||
|
||||
#ifdef __IPL_H__
|
||||
IplImage src_stub, dst_stub;
|
||||
IplImage* src_img;
|
||||
IplImage* dst_img;
|
||||
CV_CALL( src_img = cvGetImage( src, &src_stub ) );
|
||||
CV_CALL( dst_img = cvGetImage( dst, &dst_stub ) );
|
||||
iplWarpPerspectiveQ( src_img, dst_img, quad, IPL_WARP_R_TO_Q,
|
||||
IPL_INTER_CUBIC | IPL_SMOOTH_EDGE );
|
||||
#else
|
||||
|
||||
int fill_value = 0;
|
||||
|
||||
double c[3][3]; /* transformation coefficients */
|
||||
double q[4][2]; /* rearranged quad */
|
||||
|
||||
int left = 0;
|
||||
int right = 0;
|
||||
int next_right = 0;
|
||||
int next_left = 0;
|
||||
double y_min = 0;
|
||||
double y_max = 0;
|
||||
double k_left, b_left, k_right, b_right;
|
||||
|
||||
uchar* src_data;
|
||||
int src_step;
|
||||
CvSize src_size;
|
||||
|
||||
uchar* dst_data;
|
||||
int dst_step;
|
||||
CvSize dst_size;
|
||||
|
||||
double d = 0;
|
||||
int direction = 0;
|
||||
int i;
|
||||
|
||||
if( !src || (!CV_IS_IMAGE( src ) && !CV_IS_MAT( src )) ||
|
||||
cvGetElemType( src ) != CV_8UC1 ||
|
||||
cvGetDims( src ) != 2 )
|
||||
{
|
||||
CV_ERROR( CV_StsBadArg,
|
||||
"Source must be two-dimensional array of CV_8UC1 type." );
|
||||
}
|
||||
if( !dst || (!CV_IS_IMAGE( dst ) && !CV_IS_MAT( dst )) ||
|
||||
cvGetElemType( dst ) != CV_8UC1 ||
|
||||
cvGetDims( dst ) != 2 )
|
||||
{
|
||||
CV_ERROR( CV_StsBadArg,
|
||||
"Destination must be two-dimensional array of CV_8UC1 type." );
|
||||
}
|
||||
|
||||
CV_CALL( cvGetRawData( src, &src_data, &src_step, &src_size ) );
|
||||
CV_CALL( cvGetRawData( dst, &dst_data, &dst_step, &dst_size ) );
|
||||
|
||||
CV_CALL( cvGetPerspectiveTransform( src_size, quad, c ) );
|
||||
|
||||
/* if direction > 0 then vertices in quad follow in a CW direction,
|
||||
otherwise they follow in a CCW direction */
|
||||
direction = 0;
|
||||
for( i = 0; i < 4; ++i )
|
||||
{
|
||||
int ni = i + 1; if( ni == 4 ) ni = 0;
|
||||
int pi = i - 1; if( pi == -1 ) pi = 3;
|
||||
|
||||
d = (quad[i][0] - quad[pi][0])*(quad[ni][1] - quad[i][1]) -
|
||||
(quad[i][1] - quad[pi][1])*(quad[ni][0] - quad[i][0]);
|
||||
int cur_direction = CV_SIGN(d);
|
||||
if( direction == 0 )
|
||||
{
|
||||
direction = cur_direction;
|
||||
}
|
||||
else if( direction * cur_direction < 0 )
|
||||
{
|
||||
direction = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if( direction == 0 )
|
||||
{
|
||||
CV_ERROR( CV_StsBadArg, "Quadrangle is nonconvex or degenerated." );
|
||||
}
|
||||
|
||||
/* <left> is the index of the topmost quad vertice
|
||||
if there are two such vertices <left> is the leftmost one */
|
||||
left = 0;
|
||||
for( i = 1; i < 4; ++i )
|
||||
{
|
||||
if( (quad[i][1] < quad[left][1]) ||
|
||||
((quad[i][1] == quad[left][1]) && (quad[i][0] < quad[left][0])) )
|
||||
{
|
||||
left = i;
|
||||
}
|
||||
}
|
||||
/* rearrange <quad> vertices in such way that they follow in a CW
|
||||
direction and the first vertice is the topmost one and put them
|
||||
into <q> */
|
||||
if( direction > 0 )
|
||||
{
|
||||
for( i = left; i < 4; ++i )
|
||||
{
|
||||
q[i-left][0] = quad[i][0];
|
||||
q[i-left][1] = quad[i][1];
|
||||
}
|
||||
for( i = 0; i < left; ++i )
|
||||
{
|
||||
q[4-left+i][0] = quad[i][0];
|
||||
q[4-left+i][1] = quad[i][1];
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for( i = left; i >= 0; --i )
|
||||
{
|
||||
q[left-i][0] = quad[i][0];
|
||||
q[left-i][1] = quad[i][1];
|
||||
}
|
||||
for( i = 3; i > left; --i )
|
||||
{
|
||||
q[4+left-i][0] = quad[i][0];
|
||||
q[4+left-i][1] = quad[i][1];
|
||||
}
|
||||
}
|
||||
|
||||
left = right = 0;
|
||||
/* if there are two topmost points, <right> is the index of the rightmost one
|
||||
otherwise <right> */
|
||||
if( q[left][1] == q[left+1][1] )
|
||||
{
|
||||
right = 1;
|
||||
}
|
||||
|
||||
/* <next_left> follows <left> in a CCW direction */
|
||||
next_left = 3;
|
||||
/* <next_right> follows <right> in a CW direction */
|
||||
next_right = right + 1;
|
||||
|
||||
/* subtraction of 1 prevents skipping of the first row */
|
||||
y_min = q[left][1] - 1;
|
||||
|
||||
/* left edge equation: y = k_left * x + b_left */
|
||||
k_left = (q[left][0] - q[next_left][0]) /
|
||||
(q[left][1] - q[next_left][1]);
|
||||
b_left = (q[left][1] * q[next_left][0] -
|
||||
q[left][0] * q[next_left][1]) /
|
||||
(q[left][1] - q[next_left][1]);
|
||||
|
||||
/* right edge equation: y = k_right * x + b_right */
|
||||
k_right = (q[right][0] - q[next_right][0]) /
|
||||
(q[right][1] - q[next_right][1]);
|
||||
b_right = (q[right][1] * q[next_right][0] -
|
||||
q[right][0] * q[next_right][1]) /
|
||||
(q[right][1] - q[next_right][1]);
|
||||
|
||||
for(;;)
|
||||
{
|
||||
int x, y;
|
||||
|
||||
y_max = MIN( q[next_left][1], q[next_right][1] );
|
||||
|
||||
int iy_min = MAX( cvRound(y_min), 0 ) + 1;
|
||||
int iy_max = MIN( cvRound(y_max), dst_size.height - 1 );
|
||||
|
||||
double x_min = k_left * iy_min + b_left;
|
||||
double x_max = k_right * iy_min + b_right;
|
||||
|
||||
/* walk through the destination quadrangle row by row */
|
||||
for( y = iy_min; y <= iy_max; ++y )
|
||||
{
|
||||
int ix_min = MAX( cvRound( x_min ), 0 );
|
||||
int ix_max = MIN( cvRound( x_max ), dst_size.width - 1 );
|
||||
|
||||
for( x = ix_min; x <= ix_max; ++x )
|
||||
{
|
||||
/* calculate coordinates of the corresponding source array point */
|
||||
double div = (c[2][0] * x + c[2][1] * y + c[2][2]);
|
||||
double src_x = (c[0][0] * x + c[0][1] * y + c[0][2]) / div;
|
||||
double src_y = (c[1][0] * x + c[1][1] * y + c[1][2]) / div;
|
||||
|
||||
int isrc_x = cvFloor( src_x );
|
||||
int isrc_y = cvFloor( src_y );
|
||||
double delta_x = src_x - isrc_x;
|
||||
double delta_y = src_y - isrc_y;
|
||||
|
||||
uchar* s = src_data + isrc_y * src_step + isrc_x;
|
||||
|
||||
int i00, i10, i01, i11;
|
||||
i00 = i10 = i01 = i11 = (int) fill_value;
|
||||
|
||||
/* linear interpolation using 2x2 neighborhood */
|
||||
if( isrc_x >= 0 && isrc_x <= src_size.width &&
|
||||
isrc_y >= 0 && isrc_y <= src_size.height )
|
||||
{
|
||||
i00 = s[0];
|
||||
}
|
||||
if( isrc_x >= -1 && isrc_x < src_size.width &&
|
||||
isrc_y >= 0 && isrc_y <= src_size.height )
|
||||
{
|
||||
i10 = s[1];
|
||||
}
|
||||
if( isrc_x >= 0 && isrc_x <= src_size.width &&
|
||||
isrc_y >= -1 && isrc_y < src_size.height )
|
||||
{
|
||||
i01 = s[src_step];
|
||||
}
|
||||
if( isrc_x >= -1 && isrc_x < src_size.width &&
|
||||
isrc_y >= -1 && isrc_y < src_size.height )
|
||||
{
|
||||
i11 = s[src_step+1];
|
||||
}
|
||||
|
||||
double i0 = i00 + (i10 - i00)*delta_x;
|
||||
double i1 = i01 + (i11 - i01)*delta_x;
|
||||
|
||||
((uchar*)(dst_data + y * dst_step))[x] = (uchar) (i0 + (i1 - i0)*delta_y);
|
||||
}
|
||||
x_min += k_left;
|
||||
x_max += k_right;
|
||||
}
|
||||
|
||||
if( (next_left == next_right) ||
|
||||
(next_left+1 == next_right && q[next_left][1] == q[next_right][1]) )
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
if( y_max == q[next_left][1] )
|
||||
{
|
||||
left = next_left;
|
||||
next_left = left - 1;
|
||||
|
||||
k_left = (q[left][0] - q[next_left][0]) /
|
||||
(q[left][1] - q[next_left][1]);
|
||||
b_left = (q[left][1] * q[next_left][0] -
|
||||
q[left][0] * q[next_left][1]) /
|
||||
(q[left][1] - q[next_left][1]);
|
||||
}
|
||||
if( y_max == q[next_right][1] )
|
||||
{
|
||||
right = next_right;
|
||||
next_right = right + 1;
|
||||
|
||||
k_right = (q[right][0] - q[next_right][0]) /
|
||||
(q[right][1] - q[next_right][1]);
|
||||
b_right = (q[right][1] * q[next_right][0] -
|
||||
q[right][0] * q[next_right][1]) /
|
||||
(q[right][1] - q[next_right][1]);
|
||||
}
|
||||
y_min = y_max;
|
||||
}
|
||||
#endif /* #ifndef __IPL_H__ */
|
||||
|
||||
__END__;
|
||||
}
|
||||
|
||||
static
|
||||
void icvRandomQuad( int width, int height, double quad[4][2],
|
||||
double maxxangle,
|
||||
double maxyangle,
|
||||
double maxzangle )
|
||||
{
|
||||
double distfactor = 3.0;
|
||||
double distfactor2 = 1.0;
|
||||
|
||||
double halfw, halfh;
|
||||
int i;
|
||||
|
||||
double rotVectData[3];
|
||||
double vectData[3];
|
||||
double rotMatData[9];
|
||||
|
||||
CvMat rotVect;
|
||||
CvMat rotMat;
|
||||
CvMat vect;
|
||||
|
||||
double d;
|
||||
|
||||
rotVect = cvMat( 3, 1, CV_64FC1, &rotVectData[0] );
|
||||
rotMat = cvMat( 3, 3, CV_64FC1, &rotMatData[0] );
|
||||
vect = cvMat( 3, 1, CV_64FC1, &vectData[0] );
|
||||
|
||||
rotVectData[0] = maxxangle * (2.0 * rand() / RAND_MAX - 1.0);
|
||||
rotVectData[1] = ( maxyangle - fabs( rotVectData[0] ) )
|
||||
* (2.0 * rand() / RAND_MAX - 1.0);
|
||||
rotVectData[2] = maxzangle * (2.0 * rand() / RAND_MAX - 1.0);
|
||||
d = (distfactor + distfactor2 * (2.0 * rand() / RAND_MAX - 1.0)) * width;
|
||||
|
||||
/*
|
||||
rotVectData[0] = maxxangle;
|
||||
rotVectData[1] = maxyangle;
|
||||
rotVectData[2] = maxzangle;
|
||||
|
||||
d = distfactor * width;
|
||||
*/
|
||||
|
||||
cvRodrigues2( &rotVect, &rotMat );
|
||||
|
||||
halfw = 0.5 * width;
|
||||
halfh = 0.5 * height;
|
||||
|
||||
quad[0][0] = -halfw;
|
||||
quad[0][1] = -halfh;
|
||||
quad[1][0] = halfw;
|
||||
quad[1][1] = -halfh;
|
||||
quad[2][0] = halfw;
|
||||
quad[2][1] = halfh;
|
||||
quad[3][0] = -halfw;
|
||||
quad[3][1] = halfh;
|
||||
|
||||
for( i = 0; i < 4; i++ )
|
||||
{
|
||||
rotVectData[0] = quad[i][0];
|
||||
rotVectData[1] = quad[i][1];
|
||||
rotVectData[2] = 0.0;
|
||||
cvMatMulAdd( &rotMat, &rotVect, 0, &vect );
|
||||
quad[i][0] = vectData[0] * d / (d + vectData[2]) + halfw;
|
||||
quad[i][1] = vectData[1] * d / (d + vectData[2]) + halfh;
|
||||
|
||||
/*
|
||||
quad[i][0] += halfw;
|
||||
quad[i][1] += halfh;
|
||||
*/
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int icvStartSampleDistortion( const char* imgfilename, int bgcolor, int bgthreshold,
|
||||
CvSampleDistortionData* data )
|
||||
{
|
||||
memset( data, 0, sizeof( *data ) );
|
||||
data->src = cvLoadImage( imgfilename, 0 );
|
||||
if( data->src != NULL && data->src->nChannels == 1
|
||||
&& data->src->depth == IPL_DEPTH_8U )
|
||||
{
|
||||
int r, c;
|
||||
uchar* pmask;
|
||||
uchar* psrc;
|
||||
uchar* perode;
|
||||
uchar* pdilate;
|
||||
uchar dd, de;
|
||||
|
||||
data->dx = data->src->width / 2;
|
||||
data->dy = data->src->height / 2;
|
||||
data->bgcolor = bgcolor;
|
||||
|
||||
data->mask = cvCloneImage( data->src );
|
||||
data->erode = cvCloneImage( data->src );
|
||||
data->dilate = cvCloneImage( data->src );
|
||||
|
||||
/* make mask image */
|
||||
for( r = 0; r < data->mask->height; r++ )
|
||||
{
|
||||
for( c = 0; c < data->mask->width; c++ )
|
||||
{
|
||||
pmask = ( (uchar*) (data->mask->imageData + r * data->mask->widthStep)
|
||||
+ c );
|
||||
if( bgcolor - bgthreshold <= (int) (*pmask) &&
|
||||
(int) (*pmask) <= bgcolor + bgthreshold )
|
||||
{
|
||||
*pmask = (uchar) 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
*pmask = (uchar) 255;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* extend borders of source image */
|
||||
cvErode( data->src, data->erode, 0, 1 );
|
||||
cvDilate( data->src, data->dilate, 0, 1 );
|
||||
for( r = 0; r < data->mask->height; r++ )
|
||||
{
|
||||
for( c = 0; c < data->mask->width; c++ )
|
||||
{
|
||||
pmask = ( (uchar*) (data->mask->imageData + r * data->mask->widthStep)
|
||||
+ c );
|
||||
if( (*pmask) == 0 )
|
||||
{
|
||||
psrc = ( (uchar*) (data->src->imageData + r * data->src->widthStep)
|
||||
+ c );
|
||||
perode =
|
||||
( (uchar*) (data->erode->imageData + r * data->erode->widthStep)
|
||||
+ c );
|
||||
pdilate =
|
||||
( (uchar*)(data->dilate->imageData + r * data->dilate->widthStep)
|
||||
+ c );
|
||||
de = (uchar)(bgcolor - (*perode));
|
||||
dd = (uchar)((*pdilate) - bgcolor);
|
||||
if( de >= dd && de > bgthreshold )
|
||||
{
|
||||
(*psrc) = (*perode);
|
||||
}
|
||||
if( dd > de && dd > bgthreshold )
|
||||
{
|
||||
(*psrc) = (*pdilate);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
data->img = cvCreateImage( cvSize( data->src->width + 2 * data->dx,
|
||||
data->src->height + 2 * data->dy ),
|
||||
IPL_DEPTH_8U, 1 );
|
||||
data->maskimg = cvCloneImage( data->img );
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void icvPlaceDistortedSample( CvArr* background,
|
||||
int inverse, int maxintensitydev,
|
||||
double maxxangle, double maxyangle, double maxzangle,
|
||||
int inscribe, double maxshiftf, double maxscalef,
|
||||
CvSampleDistortionData* data )
|
||||
{
|
||||
double quad[4][2];
|
||||
int r, c;
|
||||
uchar* pimg;
|
||||
uchar* pbg;
|
||||
uchar* palpha;
|
||||
uchar chartmp;
|
||||
int forecolordev;
|
||||
float scale;
|
||||
IplImage* img;
|
||||
IplImage* maskimg;
|
||||
CvMat stub;
|
||||
CvMat* bgimg;
|
||||
|
||||
CvRect cr;
|
||||
CvRect roi;
|
||||
|
||||
double xshift, yshift, randscale;
|
||||
|
||||
icvRandomQuad( data->src->width, data->src->height, quad,
|
||||
maxxangle, maxyangle, maxzangle );
|
||||
quad[0][0] += (double) data->dx;
|
||||
quad[0][1] += (double) data->dy;
|
||||
quad[1][0] += (double) data->dx;
|
||||
quad[1][1] += (double) data->dy;
|
||||
quad[2][0] += (double) data->dx;
|
||||
quad[2][1] += (double) data->dy;
|
||||
quad[3][0] += (double) data->dx;
|
||||
quad[3][1] += (double) data->dy;
|
||||
|
||||
cvSet( data->img, cvScalar( data->bgcolor ) );
|
||||
cvSet( data->maskimg, cvScalar( 0.0 ) );
|
||||
|
||||
cvWarpPerspective( data->src, data->img, quad );
|
||||
cvWarpPerspective( data->mask, data->maskimg, quad );
|
||||
|
||||
cvSmooth( data->maskimg, data->maskimg, CV_GAUSSIAN, 3, 3 );
|
||||
|
||||
bgimg = cvGetMat( background, &stub );
|
||||
|
||||
cr.x = data->dx;
|
||||
cr.y = data->dy;
|
||||
cr.width = data->src->width;
|
||||
cr.height = data->src->height;
|
||||
|
||||
if( inscribe )
|
||||
{
|
||||
/* quad's circumscribing rectangle */
|
||||
cr.x = (int) MIN( quad[0][0], quad[3][0] );
|
||||
cr.y = (int) MIN( quad[0][1], quad[1][1] );
|
||||
cr.width = (int) (MAX( quad[1][0], quad[2][0] ) + 0.5F ) - cr.x;
|
||||
cr.height = (int) (MAX( quad[2][1], quad[3][1] ) + 0.5F ) - cr.y;
|
||||
}
|
||||
|
||||
xshift = maxshiftf * rand() / RAND_MAX;
|
||||
yshift = maxshiftf * rand() / RAND_MAX;
|
||||
|
||||
cr.x -= (int) ( xshift * cr.width );
|
||||
cr.y -= (int) ( yshift * cr.height );
|
||||
cr.width = (int) ((1.0 + maxshiftf) * cr.width );
|
||||
cr.height = (int) ((1.0 + maxshiftf) * cr.height);
|
||||
|
||||
randscale = maxscalef * rand() / RAND_MAX;
|
||||
cr.x -= (int) ( 0.5 * randscale * cr.width );
|
||||
cr.y -= (int) ( 0.5 * randscale * cr.height );
|
||||
cr.width = (int) ((1.0 + randscale) * cr.width );
|
||||
cr.height = (int) ((1.0 + randscale) * cr.height);
|
||||
|
||||
scale = MAX( ((float) cr.width) / bgimg->cols, ((float) cr.height) / bgimg->rows );
|
||||
|
||||
roi.x = (int) (-0.5F * (scale * bgimg->cols - cr.width) + cr.x);
|
||||
roi.y = (int) (-0.5F * (scale * bgimg->rows - cr.height) + cr.y);
|
||||
roi.width = (int) (scale * bgimg->cols);
|
||||
roi.height = (int) (scale * bgimg->rows);
|
||||
|
||||
img = cvCreateImage( cvSize( bgimg->cols, bgimg->rows ), IPL_DEPTH_8U, 1 );
|
||||
maskimg = cvCreateImage( cvSize( bgimg->cols, bgimg->rows ), IPL_DEPTH_8U, 1 );
|
||||
|
||||
cvSetImageROI( data->img, roi );
|
||||
cvResize( data->img, img );
|
||||
cvResetImageROI( data->img );
|
||||
cvSetImageROI( data->maskimg, roi );
|
||||
cvResize( data->maskimg, maskimg );
|
||||
cvResetImageROI( data->maskimg );
|
||||
|
||||
forecolordev = (int) (maxintensitydev * (2.0 * rand() / RAND_MAX - 1.0));
|
||||
|
||||
for( r = 0; r < img->height; r++ )
|
||||
{
|
||||
for( c = 0; c < img->width; c++ )
|
||||
{
|
||||
pimg = (uchar*) img->imageData + r * img->widthStep + c;
|
||||
pbg = (uchar*) bgimg->data.ptr + r * bgimg->step + c;
|
||||
palpha = (uchar*) maskimg->imageData + r * maskimg->widthStep + c;
|
||||
chartmp = (uchar) MAX( 0, MIN( 255, forecolordev + (*pimg) ) );
|
||||
if( inverse )
|
||||
{
|
||||
chartmp ^= 0xFF;
|
||||
}
|
||||
*pbg = (uchar) (( chartmp*(*palpha )+(255 - (*palpha) )*(*pbg) ) / 255);
|
||||
}
|
||||
}
|
||||
|
||||
cvReleaseImage( &img );
|
||||
cvReleaseImage( &maskimg );
|
||||
}
|
||||
|
||||
void icvEndSampleDistortion( CvSampleDistortionData* data )
|
||||
{
|
||||
if( data->src )
|
||||
{
|
||||
cvReleaseImage( &data->src );
|
||||
}
|
||||
if( data->mask )
|
||||
{
|
||||
cvReleaseImage( &data->mask );
|
||||
}
|
||||
if( data->erode )
|
||||
{
|
||||
cvReleaseImage( &data->erode );
|
||||
}
|
||||
if( data->dilate )
|
||||
{
|
||||
cvReleaseImage( &data->dilate );
|
||||
}
|
||||
if( data->img )
|
||||
{
|
||||
cvReleaseImage( &data->img );
|
||||
}
|
||||
if( data->maskimg )
|
||||
{
|
||||
cvReleaseImage( &data->maskimg );
|
||||
}
|
||||
}
|
||||
|
||||
void icvWriteVecHeader( FILE* file, int count, int width, int height )
|
||||
{
|
||||
int vecsize;
|
||||
short tmp;
|
||||
|
||||
/* number of samples */
|
||||
fwrite( &count, sizeof( count ), 1, file );
|
||||
/* vector size */
|
||||
vecsize = width * height;
|
||||
fwrite( &vecsize, sizeof( vecsize ), 1, file );
|
||||
/* min/max values */
|
||||
tmp = 0;
|
||||
fwrite( &tmp, sizeof( tmp ), 1, file );
|
||||
fwrite( &tmp, sizeof( tmp ), 1, file );
|
||||
}
|
||||
|
||||
void icvWriteVecSample( FILE* file, CvArr* sample )
|
||||
{
|
||||
CvMat* mat, stub;
|
||||
int r, c;
|
||||
short tmp;
|
||||
uchar chartmp;
|
||||
|
||||
mat = cvGetMat( sample, &stub );
|
||||
chartmp = 0;
|
||||
fwrite( &chartmp, sizeof( chartmp ), 1, file );
|
||||
for( r = 0; r < mat->rows; r++ )
|
||||
{
|
||||
for( c = 0; c < mat->cols; c++ )
|
||||
{
|
||||
tmp = (short) (CV_MAT_ELEM( *mat, uchar, r, c ));
|
||||
fwrite( &tmp, sizeof( tmp ), 1, file );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int cvCreateTrainingSamplesFromInfo( const char* infoname, const char* vecfilename,
|
||||
int num,
|
||||
int showsamples,
|
||||
int winwidth, int winheight )
|
||||
{
|
||||
char fullname[PATH_MAX];
|
||||
char* filename;
|
||||
|
||||
FILE* info;
|
||||
FILE* vec;
|
||||
IplImage* src=0;
|
||||
IplImage* sample;
|
||||
int line;
|
||||
int error;
|
||||
int i;
|
||||
int x, y, width, height;
|
||||
int total;
|
||||
|
||||
assert( infoname != NULL );
|
||||
assert( vecfilename != NULL );
|
||||
|
||||
total = 0;
|
||||
if( !icvMkDir( vecfilename ) )
|
||||
{
|
||||
|
||||
#if CV_VERBOSE
|
||||
fprintf( stderr, "Unable to create directory hierarchy: %s\n", vecfilename );
|
||||
#endif /* CV_VERBOSE */
|
||||
|
||||
return total;
|
||||
}
|
||||
|
||||
info = fopen( infoname, "r" );
|
||||
if( info == NULL )
|
||||
{
|
||||
|
||||
#if CV_VERBOSE
|
||||
fprintf( stderr, "Unable to open file: %s\n", infoname );
|
||||
#endif /* CV_VERBOSE */
|
||||
|
||||
return total;
|
||||
}
|
||||
|
||||
vec = fopen( vecfilename, "wb" );
|
||||
if( vec == NULL )
|
||||
{
|
||||
|
||||
#if CV_VERBOSE
|
||||
fprintf( stderr, "Unable to open file: %s\n", vecfilename );
|
||||
#endif /* CV_VERBOSE */
|
||||
|
||||
fclose( info );
|
||||
|
||||
return total;
|
||||
}
|
||||
|
||||
sample = cvCreateImage( cvSize( winwidth, winheight ), IPL_DEPTH_8U, 1 );
|
||||
|
||||
icvWriteVecHeader( vec, num, sample->width, sample->height );
|
||||
|
||||
if( showsamples )
|
||||
{
|
||||
cvNamedWindow( "Sample", CV_WINDOW_AUTOSIZE );
|
||||
}
|
||||
|
||||
strcpy( fullname, infoname );
|
||||
filename = strrchr( fullname, '\\' );
|
||||
if( filename == NULL )
|
||||
{
|
||||
filename = strrchr( fullname, '/' );
|
||||
}
|
||||
if( filename == NULL )
|
||||
{
|
||||
filename = fullname;
|
||||
}
|
||||
else
|
||||
{
|
||||
filename++;
|
||||
}
|
||||
|
||||
for( line = 1, error = 0, total = 0; total < num ;line++ )
|
||||
{
|
||||
int count;
|
||||
|
||||
error = ( fscanf( info, "%s %d", filename, &count ) != 2 );
|
||||
if( !error )
|
||||
{
|
||||
src = cvLoadImage( fullname, 0 );
|
||||
error = ( src == NULL );
|
||||
if( error )
|
||||
{
|
||||
|
||||
#if CV_VERBOSE
|
||||
fprintf( stderr, "Unable to open image: %s\n", fullname );
|
||||
#endif /* CV_VERBOSE */
|
||||
|
||||
}
|
||||
}
|
||||
for( i = 0; (i < count) && (total < num); i++, total++ )
|
||||
{
|
||||
error = ( fscanf( info, "%d %d %d %d", &x, &y, &width, &height ) != 4 );
|
||||
if( error ) break;
|
||||
cvSetImageROI( src, cvRect( x, y, width, height ) );
|
||||
cvResize( src, sample, width >= sample->width &&
|
||||
height >= sample->height ? CV_INTER_AREA : CV_INTER_LINEAR );
|
||||
|
||||
if( showsamples )
|
||||
{
|
||||
cvShowImage( "Sample", sample );
|
||||
if( cvWaitKey( 0 ) == 27 )
|
||||
{
|
||||
showsamples = 0;
|
||||
}
|
||||
}
|
||||
icvWriteVecSample( vec, sample );
|
||||
}
|
||||
|
||||
if( src )
|
||||
{
|
||||
cvReleaseImage( &src );
|
||||
}
|
||||
|
||||
if( error )
|
||||
{
|
||||
|
||||
#if CV_VERBOSE
|
||||
fprintf( stderr, "%s(%d) : parse error", infoname, line );
|
||||
#endif /* CV_VERBOSE */
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if( sample )
|
||||
{
|
||||
cvReleaseImage( &sample );
|
||||
}
|
||||
|
||||
fclose( vec );
|
||||
fclose( info );
|
||||
|
||||
return total;
|
||||
}
|
||||
|
||||
|
||||
void cvShowVecSamples( const char* filename, int winwidth, int winheight,
|
||||
double scale )
|
||||
{
|
||||
CvVecFile file;
|
||||
short tmp;
|
||||
int i;
|
||||
CvMat* sample;
|
||||
|
||||
tmp = 0;
|
||||
file.input = fopen( filename, "rb" );
|
||||
|
||||
if( file.input != NULL )
|
||||
{
|
||||
size_t elements_read1 = fread( &file.count, sizeof( file.count ), 1, file.input );
|
||||
size_t elements_read2 = fread( &file.vecsize, sizeof( file.vecsize ), 1, file.input );
|
||||
size_t elements_read3 = fread( &tmp, sizeof( tmp ), 1, file.input );
|
||||
size_t elements_read4 = fread( &tmp, sizeof( tmp ), 1, file.input );
|
||||
CV_Assert(elements_read1 == 1 && elements_read2 == 1 && elements_read3 == 1 && elements_read4 == 1);
|
||||
|
||||
if( file.vecsize != winwidth * winheight )
|
||||
{
|
||||
int guessed_w = 0;
|
||||
int guessed_h = 0;
|
||||
|
||||
fprintf( stderr, "Warning: specified sample width=%d and height=%d "
|
||||
"does not correspond to .vec file vector size=%d.\n",
|
||||
winwidth, winheight, file.vecsize );
|
||||
if( file.vecsize > 0 )
|
||||
{
|
||||
guessed_w = cvFloor( sqrt( (float) file.vecsize ) );
|
||||
if( guessed_w > 0 )
|
||||
{
|
||||
guessed_h = file.vecsize / guessed_w;
|
||||
}
|
||||
}
|
||||
|
||||
if( guessed_w <= 0 || guessed_h <= 0 || guessed_w * guessed_h != file.vecsize)
|
||||
{
|
||||
fprintf( stderr, "Error: failed to guess sample width and height\n" );
|
||||
fclose( file.input );
|
||||
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
winwidth = guessed_w;
|
||||
winheight = guessed_h;
|
||||
fprintf( stderr, "Guessed width=%d, guessed height=%d\n",
|
||||
winwidth, winheight );
|
||||
}
|
||||
}
|
||||
|
||||
if( !feof( file.input ) && scale > 0 )
|
||||
{
|
||||
CvMat* scaled_sample = 0;
|
||||
|
||||
file.last = 0;
|
||||
file.vector = (short*) cvAlloc( sizeof( *file.vector ) * file.vecsize );
|
||||
sample = scaled_sample = cvCreateMat( winheight, winwidth, CV_8UC1 );
|
||||
if( scale != 1.0 )
|
||||
{
|
||||
scaled_sample = cvCreateMat( MAX( 1, cvCeil( scale * winheight ) ),
|
||||
MAX( 1, cvCeil( scale * winwidth ) ),
|
||||
CV_8UC1 );
|
||||
}
|
||||
cvNamedWindow( "Sample", CV_WINDOW_AUTOSIZE );
|
||||
for( i = 0; i < file.count; i++ )
|
||||
{
|
||||
icvGetHaarTraininDataFromVecCallback( sample, &file );
|
||||
if( scale != 1.0 ) cvResize( sample, scaled_sample, CV_INTER_LINEAR);
|
||||
cvShowImage( "Sample", scaled_sample );
|
||||
if( cvWaitKey( 0 ) == 27 ) break;
|
||||
}
|
||||
if( scaled_sample && scaled_sample != sample ) cvReleaseMat( &scaled_sample );
|
||||
cvReleaseMat( &sample );
|
||||
cvFree( &file.vector );
|
||||
}
|
||||
fclose( file.input );
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* End of file. */
|
@ -1,284 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// Intel License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000, Intel Corporation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
/*
|
||||
* haartraining.cpp
|
||||
*
|
||||
* Train cascade classifier
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstring>
|
||||
#include <cstdlib>
|
||||
|
||||
using namespace std;
|
||||
|
||||
#include "cvhaartraining.h"
|
||||
|
||||
int main( int argc, char* argv[] )
|
||||
{
|
||||
int i = 0;
|
||||
char* nullname = (char*)"(NULL)";
|
||||
|
||||
char* vecname = NULL;
|
||||
char* dirname = NULL;
|
||||
char* bgname = NULL;
|
||||
|
||||
bool bg_vecfile = false;
|
||||
int npos = 2000;
|
||||
int nneg = 2000;
|
||||
int nstages = 14;
|
||||
int mem = 200;
|
||||
int nsplits = 1;
|
||||
float minhitrate = 0.995F;
|
||||
float maxfalsealarm = 0.5F;
|
||||
float weightfraction = 0.95F;
|
||||
int mode = 0;
|
||||
int symmetric = 1;
|
||||
int equalweights = 0;
|
||||
int width = 24;
|
||||
int height = 24;
|
||||
const char* boosttypes[] = { "DAB", "RAB", "LB", "GAB" };
|
||||
int boosttype = 3;
|
||||
const char* stumperrors[] = { "misclass", "gini", "entropy" };
|
||||
int stumperror = 0;
|
||||
int maxtreesplits = 0;
|
||||
int minpos = 500;
|
||||
|
||||
if( argc == 1 )
|
||||
{
|
||||
printf( "Usage: %s\n -data <dir_name>\n"
|
||||
" -vec <vec_file_name>\n"
|
||||
" -bg <background_file_name>\n"
|
||||
" [-bg-vecfile]\n"
|
||||
" [-npos <number_of_positive_samples = %d>]\n"
|
||||
" [-nneg <number_of_negative_samples = %d>]\n"
|
||||
" [-nstages <number_of_stages = %d>]\n"
|
||||
" [-nsplits <number_of_splits = %d>]\n"
|
||||
" [-mem <memory_in_MB = %d>]\n"
|
||||
" [-sym (default)] [-nonsym]\n"
|
||||
" [-minhitrate <min_hit_rate = %f>]\n"
|
||||
" [-maxfalsealarm <max_false_alarm_rate = %f>]\n"
|
||||
" [-weighttrimming <weight_trimming = %f>]\n"
|
||||
" [-eqw]\n"
|
||||
" [-mode <BASIC (default) | CORE | ALL>]\n"
|
||||
" [-w <sample_width = %d>]\n"
|
||||
" [-h <sample_height = %d>]\n"
|
||||
" [-bt <DAB | RAB | LB | GAB (default)>]\n"
|
||||
" [-err <misclass (default) | gini | entropy>]\n"
|
||||
" [-maxtreesplits <max_number_of_splits_in_tree_cascade = %d>]\n"
|
||||
" [-minpos <min_number_of_positive_samples_per_cluster = %d>]\n",
|
||||
argv[0], npos, nneg, nstages, nsplits, mem,
|
||||
minhitrate, maxfalsealarm, weightfraction, width, height,
|
||||
maxtreesplits, minpos );
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
for( i = 1; i < argc; i++ )
|
||||
{
|
||||
if( !strcmp( argv[i], "-data" ) )
|
||||
{
|
||||
dirname = argv[++i];
|
||||
}
|
||||
else if( !strcmp( argv[i], "-vec" ) )
|
||||
{
|
||||
vecname = argv[++i];
|
||||
}
|
||||
else if( !strcmp( argv[i], "-bg" ) )
|
||||
{
|
||||
bgname = argv[++i];
|
||||
}
|
||||
else if( !strcmp( argv[i], "-bg-vecfile" ) )
|
||||
{
|
||||
bg_vecfile = true;
|
||||
}
|
||||
else if( !strcmp( argv[i], "-npos" ) )
|
||||
{
|
||||
npos = atoi( argv[++i] );
|
||||
}
|
||||
else if( !strcmp( argv[i], "-nneg" ) )
|
||||
{
|
||||
nneg = atoi( argv[++i] );
|
||||
}
|
||||
else if( !strcmp( argv[i], "-nstages" ) )
|
||||
{
|
||||
nstages = atoi( argv[++i] );
|
||||
}
|
||||
else if( !strcmp( argv[i], "-nsplits" ) )
|
||||
{
|
||||
nsplits = atoi( argv[++i] );
|
||||
}
|
||||
else if( !strcmp( argv[i], "-mem" ) )
|
||||
{
|
||||
mem = atoi( argv[++i] );
|
||||
}
|
||||
else if( !strcmp( argv[i], "-sym" ) )
|
||||
{
|
||||
symmetric = 1;
|
||||
}
|
||||
else if( !strcmp( argv[i], "-nonsym" ) )
|
||||
{
|
||||
symmetric = 0;
|
||||
}
|
||||
else if( !strcmp( argv[i], "-minhitrate" ) )
|
||||
{
|
||||
minhitrate = (float) atof( argv[++i] );
|
||||
}
|
||||
else if( !strcmp( argv[i], "-maxfalsealarm" ) )
|
||||
{
|
||||
maxfalsealarm = (float) atof( argv[++i] );
|
||||
}
|
||||
else if( !strcmp( argv[i], "-weighttrimming" ) )
|
||||
{
|
||||
weightfraction = (float) atof( argv[++i] );
|
||||
}
|
||||
else if( !strcmp( argv[i], "-eqw" ) )
|
||||
{
|
||||
equalweights = 1;
|
||||
}
|
||||
else if( !strcmp( argv[i], "-mode" ) )
|
||||
{
|
||||
char* tmp = argv[++i];
|
||||
|
||||
if( !strcmp( tmp, "CORE" ) )
|
||||
{
|
||||
mode = 1;
|
||||
}
|
||||
else if( !strcmp( tmp, "ALL" ) )
|
||||
{
|
||||
mode = 2;
|
||||
}
|
||||
else
|
||||
{
|
||||
mode = 0;
|
||||
}
|
||||
}
|
||||
else if( !strcmp( argv[i], "-w" ) )
|
||||
{
|
||||
width = atoi( argv[++i] );
|
||||
}
|
||||
else if( !strcmp( argv[i], "-h" ) )
|
||||
{
|
||||
height = atoi( argv[++i] );
|
||||
}
|
||||
else if( !strcmp( argv[i], "-bt" ) )
|
||||
{
|
||||
i++;
|
||||
if( !strcmp( argv[i], boosttypes[0] ) )
|
||||
{
|
||||
boosttype = 0;
|
||||
}
|
||||
else if( !strcmp( argv[i], boosttypes[1] ) )
|
||||
{
|
||||
boosttype = 1;
|
||||
}
|
||||
else if( !strcmp( argv[i], boosttypes[2] ) )
|
||||
{
|
||||
boosttype = 2;
|
||||
}
|
||||
else
|
||||
{
|
||||
boosttype = 3;
|
||||
}
|
||||
}
|
||||
else if( !strcmp( argv[i], "-err" ) )
|
||||
{
|
||||
i++;
|
||||
if( !strcmp( argv[i], stumperrors[0] ) )
|
||||
{
|
||||
stumperror = 0;
|
||||
}
|
||||
else if( !strcmp( argv[i], stumperrors[1] ) )
|
||||
{
|
||||
stumperror = 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
stumperror = 2;
|
||||
}
|
||||
}
|
||||
else if( !strcmp( argv[i], "-maxtreesplits" ) )
|
||||
{
|
||||
maxtreesplits = atoi( argv[++i] );
|
||||
}
|
||||
else if( !strcmp( argv[i], "-minpos" ) )
|
||||
{
|
||||
minpos = atoi( argv[++i] );
|
||||
}
|
||||
}
|
||||
|
||||
printf( "Data dir name: %s\n", ((dirname == NULL) ? nullname : dirname ) );
|
||||
printf( "Vec file name: %s\n", ((vecname == NULL) ? nullname : vecname ) );
|
||||
printf( "BG file name: %s, is a vecfile: %s\n", ((bgname == NULL) ? nullname : bgname ), bg_vecfile ? "yes" : "no" );
|
||||
printf( "Num pos: %d\n", npos );
|
||||
printf( "Num neg: %d\n", nneg );
|
||||
printf( "Num stages: %d\n", nstages );
|
||||
printf( "Num splits: %d (%s as weak classifier)\n", nsplits,
|
||||
(nsplits == 1) ? "stump" : "tree" );
|
||||
printf( "Mem: %d MB\n", mem );
|
||||
printf( "Symmetric: %s\n", (symmetric) ? "TRUE" : "FALSE" );
|
||||
printf( "Min hit rate: %f\n", minhitrate );
|
||||
printf( "Max false alarm rate: %f\n", maxfalsealarm );
|
||||
printf( "Weight trimming: %f\n", weightfraction );
|
||||
printf( "Equal weights: %s\n", (equalweights) ? "TRUE" : "FALSE" );
|
||||
printf( "Mode: %s\n", ( (mode == 0) ? "BASIC" : ( (mode == 1) ? "CORE" : "ALL") ) );
|
||||
printf( "Width: %d\n", width );
|
||||
printf( "Height: %d\n", height );
|
||||
//printf( "Max num of precalculated features: %d\n", numprecalculated );
|
||||
printf( "Applied boosting algorithm: %s\n", boosttypes[boosttype] );
|
||||
printf( "Error (valid only for Discrete and Real AdaBoost): %s\n",
|
||||
stumperrors[stumperror] );
|
||||
|
||||
printf( "Max number of splits in tree cascade: %d\n", maxtreesplits );
|
||||
printf( "Min number of positive samples per cluster: %d\n", minpos );
|
||||
|
||||
cvCreateTreeCascadeClassifier( dirname, vecname, bgname,
|
||||
npos, nneg, nstages, mem,
|
||||
nsplits,
|
||||
minhitrate, maxfalsealarm, weightfraction,
|
||||
mode, symmetric,
|
||||
equalweights, width, height,
|
||||
boosttype, stumperror,
|
||||
maxtreesplits, minpos, bg_vecfile );
|
||||
|
||||
return 0;
|
||||
}
|
@ -1,377 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// Intel License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000, Intel Corporation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
/*
|
||||
* performance.cpp
|
||||
*
|
||||
* Measure performance of classifier
|
||||
*/
|
||||
#include "opencv2/core.hpp"
|
||||
|
||||
#include "cv.h"
|
||||
#include "highgui.h"
|
||||
|
||||
#include <cstdio>
|
||||
#include <cmath>
|
||||
#include <ctime>
|
||||
|
||||
#ifdef _WIN32
|
||||
/* use clock() function insted of time() */
|
||||
#define time( arg ) (((double) clock()) / CLOCKS_PER_SEC)
|
||||
#endif /* _WIN32 */
|
||||
|
||||
#ifndef PATH_MAX
|
||||
#define PATH_MAX 512
|
||||
#endif /* PATH_MAX */
|
||||
|
||||
typedef struct HidCascade
|
||||
{
|
||||
int size;
|
||||
int count;
|
||||
} HidCascade;
|
||||
|
||||
typedef struct ObjectPos
|
||||
{
|
||||
float x;
|
||||
float y;
|
||||
float width;
|
||||
int found; /* for reference */
|
||||
int neghbors;
|
||||
} ObjectPos;
|
||||
|
||||
int main( int argc, char* argv[] )
|
||||
{
|
||||
int i, j;
|
||||
char* classifierdir = NULL;
|
||||
//char* samplesdir = NULL;
|
||||
|
||||
int saveDetected = 1;
|
||||
double scale_factor = 1.2;
|
||||
float maxSizeDiff = 1.5F;
|
||||
float maxPosDiff = 0.3F;
|
||||
|
||||
/* number of stages. if <=0 all stages are used */
|
||||
int nos = -1, nos0;
|
||||
|
||||
int width = 24;
|
||||
int height = 24;
|
||||
|
||||
int rocsize;
|
||||
|
||||
FILE* info;
|
||||
char* infoname;
|
||||
char fullname[PATH_MAX];
|
||||
char detfilename[PATH_MAX];
|
||||
char* filename;
|
||||
char detname[] = "det-";
|
||||
|
||||
CvHaarClassifierCascade* cascade;
|
||||
CvMemStorage* storage;
|
||||
CvSeq* objects;
|
||||
|
||||
double totaltime;
|
||||
|
||||
infoname = (char*)"";
|
||||
rocsize = 40;
|
||||
if( argc == 1 )
|
||||
{
|
||||
printf( "Usage: %s\n -data <classifier_directory_name>\n"
|
||||
" -info <collection_file_name>\n"
|
||||
" [-maxSizeDiff <max_size_difference = %f>]\n"
|
||||
" [-maxPosDiff <max_position_difference = %f>]\n"
|
||||
" [-sf <scale_factor = %f>]\n"
|
||||
" [-ni]\n"
|
||||
" [-nos <number_of_stages = %d>]\n"
|
||||
" [-rs <roc_size = %d>]\n"
|
||||
" [-w <sample_width = %d>]\n"
|
||||
" [-h <sample_height = %d>]\n",
|
||||
argv[0], maxSizeDiff, maxPosDiff, scale_factor, nos, rocsize,
|
||||
width, height );
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
for( i = 1; i < argc; i++ )
|
||||
{
|
||||
if( !strcmp( argv[i], "-data" ) )
|
||||
{
|
||||
classifierdir = argv[++i];
|
||||
}
|
||||
else if( !strcmp( argv[i], "-info" ) )
|
||||
{
|
||||
infoname = argv[++i];
|
||||
}
|
||||
else if( !strcmp( argv[i], "-maxSizeDiff" ) )
|
||||
{
|
||||
maxSizeDiff = (float) atof( argv[++i] );
|
||||
}
|
||||
else if( !strcmp( argv[i], "-maxPosDiff" ) )
|
||||
{
|
||||
maxPosDiff = (float) atof( argv[++i] );
|
||||
}
|
||||
else if( !strcmp( argv[i], "-sf" ) )
|
||||
{
|
||||
scale_factor = atof( argv[++i] );
|
||||
}
|
||||
else if( !strcmp( argv[i], "-ni" ) )
|
||||
{
|
||||
saveDetected = 0;
|
||||
}
|
||||
else if( !strcmp( argv[i], "-nos" ) )
|
||||
{
|
||||
nos = atoi( argv[++i] );
|
||||
}
|
||||
else if( !strcmp( argv[i], "-rs" ) )
|
||||
{
|
||||
rocsize = atoi( argv[++i] );
|
||||
}
|
||||
else if( !strcmp( argv[i], "-w" ) )
|
||||
{
|
||||
width = atoi( argv[++i] );
|
||||
}
|
||||
else if( !strcmp( argv[i], "-h" ) )
|
||||
{
|
||||
height = atoi( argv[++i] );
|
||||
}
|
||||
}
|
||||
|
||||
cascade = cvLoadHaarClassifierCascade( classifierdir, cvSize( width, height ) );
|
||||
if( cascade == NULL )
|
||||
{
|
||||
printf( "Unable to load classifier from %s\n", classifierdir );
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int* numclassifiers = new int[cascade->count];
|
||||
numclassifiers[0] = cascade->stage_classifier[0].count;
|
||||
for( i = 1; i < cascade->count; i++ )
|
||||
{
|
||||
numclassifiers[i] = numclassifiers[i-1] + cascade->stage_classifier[i].count;
|
||||
}
|
||||
|
||||
storage = cvCreateMemStorage();
|
||||
|
||||
nos0 = cascade->count;
|
||||
if( nos <= 0 )
|
||||
nos = nos0;
|
||||
|
||||
strcpy( fullname, infoname );
|
||||
filename = strrchr( fullname, '\\' );
|
||||
if( filename == NULL )
|
||||
{
|
||||
filename = strrchr( fullname, '/' );
|
||||
}
|
||||
if( filename == NULL )
|
||||
{
|
||||
filename = fullname;
|
||||
}
|
||||
else
|
||||
{
|
||||
filename++;
|
||||
}
|
||||
|
||||
info = fopen( infoname, "r" );
|
||||
totaltime = 0.0;
|
||||
if( info != NULL )
|
||||
{
|
||||
int x, y;
|
||||
IplImage* img;
|
||||
int hits, missed, falseAlarms;
|
||||
int totalHits, totalMissed, totalFalseAlarms;
|
||||
int found;
|
||||
float distance;
|
||||
|
||||
int refcount;
|
||||
ObjectPos* ref;
|
||||
int detcount;
|
||||
ObjectPos* det;
|
||||
int error=0;
|
||||
|
||||
int* pos;
|
||||
int* neg;
|
||||
|
||||
pos = (int*) cvAlloc( rocsize * sizeof( *pos ) );
|
||||
neg = (int*) cvAlloc( rocsize * sizeof( *neg ) );
|
||||
for( i = 0; i < rocsize; i++ ) { pos[i] = neg[i] = 0; }
|
||||
|
||||
printf( "+================================+======+======+======+\n" );
|
||||
printf( "| File Name | Hits |Missed| False|\n" );
|
||||
printf( "+================================+======+======+======+\n" );
|
||||
|
||||
totalHits = totalMissed = totalFalseAlarms = 0;
|
||||
while( !feof( info ) )
|
||||
{
|
||||
if( fscanf( info, "%s %d", filename, &refcount ) != 2 || refcount <= 0 ) break;
|
||||
|
||||
img = cvLoadImage( fullname );
|
||||
if( !img ) continue;
|
||||
|
||||
ref = (ObjectPos*) cvAlloc( refcount * sizeof( *ref ) );
|
||||
for( i = 0; i < refcount; i++ )
|
||||
{
|
||||
int w, h;
|
||||
error = (fscanf( info, "%d %d %d %d", &x, &y, &w, &h ) != 4);
|
||||
if( error ) break;
|
||||
ref[i].x = 0.5F * w + x;
|
||||
ref[i].y = 0.5F * h + y;
|
||||
ref[i].width = sqrtf( 0.5F * (w * w + h * h) );
|
||||
ref[i].found = 0;
|
||||
ref[i].neghbors = 0;
|
||||
}
|
||||
if( !error )
|
||||
{
|
||||
cvClearMemStorage( storage );
|
||||
|
||||
cascade->count = nos;
|
||||
totaltime -= time( 0 );
|
||||
objects = cvHaarDetectObjects( img, cascade, storage, scale_factor, 1 );
|
||||
totaltime += time( 0 );
|
||||
cascade->count = nos0;
|
||||
|
||||
detcount = ( objects ? objects->total : 0);
|
||||
det = (detcount > 0) ?
|
||||
( (ObjectPos*)cvAlloc( detcount * sizeof( *det )) ) : NULL;
|
||||
hits = missed = falseAlarms = 0;
|
||||
for( i = 0; i < detcount; i++ )
|
||||
{
|
||||
CvAvgComp r = *((CvAvgComp*) cvGetSeqElem( objects, i ));
|
||||
det[i].x = 0.5F * r.rect.width + r.rect.x;
|
||||
det[i].y = 0.5F * r.rect.height + r.rect.y;
|
||||
det[i].width = sqrtf( 0.5F * (r.rect.width * r.rect.width +
|
||||
r.rect.height * r.rect.height) );
|
||||
det[i].neghbors = r.neighbors;
|
||||
|
||||
if( saveDetected )
|
||||
{
|
||||
cvRectangle( img, cvPoint( r.rect.x, r.rect.y ),
|
||||
cvPoint( r.rect.x + r.rect.width, r.rect.y + r.rect.height ),
|
||||
CV_RGB( 255, 0, 0 ), 3 );
|
||||
}
|
||||
|
||||
found = 0;
|
||||
for( j = 0; j < refcount; j++ )
|
||||
{
|
||||
distance = sqrtf( (det[i].x - ref[j].x) * (det[i].x - ref[j].x) +
|
||||
(det[i].y - ref[j].y) * (det[i].y - ref[j].y) );
|
||||
if( (distance < ref[j].width * maxPosDiff) &&
|
||||
(det[i].width > ref[j].width / maxSizeDiff) &&
|
||||
(det[i].width < ref[j].width * maxSizeDiff) )
|
||||
{
|
||||
ref[j].found = 1;
|
||||
ref[j].neghbors = MAX( ref[j].neghbors, det[i].neghbors );
|
||||
found = 1;
|
||||
}
|
||||
}
|
||||
if( !found )
|
||||
{
|
||||
falseAlarms++;
|
||||
neg[MIN(det[i].neghbors, rocsize - 1)]++;
|
||||
}
|
||||
}
|
||||
for( j = 0; j < refcount; j++ )
|
||||
{
|
||||
if( ref[j].found )
|
||||
{
|
||||
hits++;
|
||||
pos[MIN(ref[j].neghbors, rocsize - 1)]++;
|
||||
}
|
||||
else
|
||||
{
|
||||
missed++;
|
||||
}
|
||||
}
|
||||
|
||||
totalHits += hits;
|
||||
totalMissed += missed;
|
||||
totalFalseAlarms += falseAlarms;
|
||||
printf( "|%32.32s|%6d|%6d|%6d|\n", filename, hits, missed, falseAlarms );
|
||||
printf( "+--------------------------------+------+------+------+\n" );
|
||||
fflush( stdout );
|
||||
|
||||
if( saveDetected )
|
||||
{
|
||||
strcpy( detfilename, detname );
|
||||
strcat( detfilename, filename );
|
||||
strcpy( filename, detfilename );
|
||||
cvvSaveImage( fullname, img );
|
||||
}
|
||||
|
||||
if( det ) { cvFree( &det ); det = NULL; }
|
||||
} /* if( !error ) */
|
||||
|
||||
cvReleaseImage( &img );
|
||||
cvFree( &ref );
|
||||
}
|
||||
fclose( info );
|
||||
|
||||
printf( "|%32.32s|%6d|%6d|%6d|\n", "Total",
|
||||
totalHits, totalMissed, totalFalseAlarms );
|
||||
printf( "+================================+======+======+======+\n" );
|
||||
printf( "Number of stages: %d\n", nos );
|
||||
printf( "Number of weak classifiers: %d\n", numclassifiers[nos - 1] );
|
||||
printf( "Total time: %f\n", totaltime );
|
||||
|
||||
/* print ROC to stdout */
|
||||
for( i = rocsize - 1; i > 0; i-- )
|
||||
{
|
||||
pos[i-1] += pos[i];
|
||||
neg[i-1] += neg[i];
|
||||
}
|
||||
fprintf( stderr, "%d\n", nos );
|
||||
for( i = 0; i < rocsize; i++ )
|
||||
{
|
||||
fprintf( stderr, "\t%d\t%d\t%f\t%f\n", pos[i], neg[i],
|
||||
((float)pos[i]) / (totalHits + totalMissed),
|
||||
((float)neg[i]) / (totalHits + totalMissed) );
|
||||
}
|
||||
|
||||
cvFree( &pos );
|
||||
cvFree( &neg );
|
||||
}
|
||||
|
||||
delete[] numclassifiers;
|
||||
|
||||
cvReleaseHaarClassifierCascade( &cascade );
|
||||
cvReleaseMemStorage( &storage );
|
||||
|
||||
return 0;
|
||||
}
|
@ -1,33 +0,0 @@
|
||||
set(name sft)
|
||||
set(the_target opencv_${name})
|
||||
|
||||
set(OPENCV_${the_target}_DEPS opencv_core opencv_softcascade opencv_highgui opencv_imgproc opencv_ml)
|
||||
ocv_check_dependencies(${OPENCV_${the_target}_DEPS})
|
||||
|
||||
if(NOT OCV_DEPENDENCIES_FOUND)
|
||||
return()
|
||||
endif()
|
||||
|
||||
project(${the_target})
|
||||
|
||||
ocv_include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include" "${OpenCV_SOURCE_DIR}/include/opencv")
|
||||
ocv_include_modules(${OPENCV_${the_target}_DEPS})
|
||||
|
||||
file(GLOB ${the_target}_SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp)
|
||||
|
||||
add_executable(${the_target} ${${the_target}_SOURCES})
|
||||
|
||||
target_link_libraries(${the_target} ${OPENCV_${the_target}_DEPS})
|
||||
|
||||
set_target_properties(${the_target} PROPERTIES
|
||||
DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}"
|
||||
ARCHIVE_OUTPUT_DIRECTORY ${LIBRARY_OUTPUT_PATH}
|
||||
RUNTIME_OUTPUT_DIRECTORY ${EXECUTABLE_OUTPUT_PATH}
|
||||
INSTALL_NAME_DIR lib
|
||||
OUTPUT_NAME "opencv_trainsoftcascade")
|
||||
|
||||
if(ENABLE_SOLUTION_FOLDERS)
|
||||
set_target_properties(${the_target} PROPERTIES FOLDER "applications")
|
||||
endif()
|
||||
|
||||
install(TARGETS ${the_target} RUNTIME DESTINATION bin COMPONENT main)
|
@ -1,162 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2008-2012, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include <sft/config.hpp>
|
||||
#include <iomanip>
|
||||
|
||||
sft::Config::Config(): seed(0) {}
|
||||
|
||||
void sft::Config::write(cv::FileStorage& fs) const
|
||||
{
|
||||
fs << "{"
|
||||
<< "trainPath" << trainPath
|
||||
<< "testPath" << testPath
|
||||
|
||||
<< "modelWinSize" << modelWinSize
|
||||
<< "offset" << offset
|
||||
<< "octaves" << octaves
|
||||
|
||||
<< "positives" << positives
|
||||
<< "negatives" << negatives
|
||||
<< "btpNegatives" << btpNegatives
|
||||
|
||||
<< "shrinkage" << shrinkage
|
||||
|
||||
<< "treeDepth" << treeDepth
|
||||
<< "weaks" << weaks
|
||||
<< "poolSize" << poolSize
|
||||
|
||||
<< "cascadeName" << cascadeName
|
||||
<< "outXmlPath" << outXmlPath
|
||||
|
||||
<< "seed" << seed
|
||||
<< "featureType" << featureType
|
||||
<< "}";
|
||||
}
|
||||
|
||||
void sft::Config::read(const cv::FileNode& node)
|
||||
{
|
||||
trainPath = (string)node["trainPath"];
|
||||
testPath = (string)node["testPath"];
|
||||
|
||||
cv::FileNodeIterator nIt = node["modelWinSize"].end();
|
||||
modelWinSize = cv::Size((int)*(--nIt), (int)*(--nIt));
|
||||
|
||||
nIt = node["offset"].end();
|
||||
offset = cv::Point2i((int)*(--nIt), (int)*(--nIt));
|
||||
|
||||
node["octaves"] >> octaves;
|
||||
|
||||
positives = (int)node["positives"];
|
||||
negatives = (int)node["negatives"];
|
||||
btpNegatives = (int)node["btpNegatives"];
|
||||
|
||||
shrinkage = (int)node["shrinkage"];
|
||||
|
||||
treeDepth = (int)node["treeDepth"];
|
||||
weaks = (int)node["weaks"];
|
||||
poolSize = (int)node["poolSize"];
|
||||
|
||||
cascadeName = (std::string)node["cascadeName"];
|
||||
outXmlPath = (std::string)node["outXmlPath"];
|
||||
|
||||
seed = (int)node["seed"];
|
||||
featureType = (std::string)node["featureType"];
|
||||
}
|
||||
|
||||
void sft::write(cv::FileStorage& fs, const string&, const Config& x)
|
||||
{
|
||||
x.write(fs);
|
||||
}
|
||||
|
||||
void sft::read(const cv::FileNode& node, Config& x, const Config& default_value)
|
||||
{
|
||||
x = default_value;
|
||||
|
||||
if(!node.empty())
|
||||
x.read(node);
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
struct Out
|
||||
{
|
||||
Out(std::ostream& _out): out(_out) {}
|
||||
template<typename T>
|
||||
void operator ()(const T a) const {out << a << " ";}
|
||||
|
||||
std::ostream& out;
|
||||
private:
|
||||
Out& operator=(Out const& other);
|
||||
};
|
||||
}
|
||||
|
||||
std::ostream& sft::operator<<(std::ostream& out, const Config& m)
|
||||
{
|
||||
out << std::setw(14) << std::left << "trainPath" << m.trainPath << std::endl
|
||||
<< std::setw(14) << std::left << "testPath" << m.testPath << std::endl
|
||||
|
||||
<< std::setw(14) << std::left << "modelWinSize" << m.modelWinSize << std::endl
|
||||
<< std::setw(14) << std::left << "offset" << m.offset << std::endl
|
||||
<< std::setw(14) << std::left << "octaves";
|
||||
|
||||
Out o(out);
|
||||
for_each(m.octaves.begin(), m.octaves.end(), o);
|
||||
|
||||
out << std::endl
|
||||
<< std::setw(14) << std::left << "positives" << m.positives << std::endl
|
||||
<< std::setw(14) << std::left << "negatives" << m.negatives << std::endl
|
||||
<< std::setw(14) << std::left << "btpNegatives" << m.btpNegatives << std::endl
|
||||
|
||||
<< std::setw(14) << std::left << "shrinkage" << m.shrinkage << std::endl
|
||||
|
||||
<< std::setw(14) << std::left << "treeDepth" << m.treeDepth << std::endl
|
||||
<< std::setw(14) << std::left << "weaks" << m.weaks << std::endl
|
||||
<< std::setw(14) << std::left << "poolSize" << m.poolSize << std::endl
|
||||
|
||||
<< std::setw(14) << std::left << "cascadeName" << m.cascadeName << std::endl
|
||||
<< std::setw(14) << std::left << "outXmlPath" << m.outXmlPath << std::endl
|
||||
<< std::setw(14) << std::left << "seed" << m.seed << std::endl
|
||||
<< std::setw(14) << std::left << "featureType" << m.featureType << std::endl;
|
||||
|
||||
return out;
|
||||
}
|
@ -1,77 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2008-2012, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include <sft/dataset.hpp>
|
||||
#include <opencv2/highgui.hpp>
|
||||
|
||||
#include <iostream>
|
||||
#include <queue>
|
||||
|
||||
// in the default case data folders should be aligned as following:
|
||||
// 1. positives: <train or test path>/octave_<octave number>/pos/*.png
|
||||
// 2. negatives: <train or test path>/octave_<octave number>/neg/*.png
|
||||
sft::ScaledDataset::ScaledDataset(const string& path, const int oct)
|
||||
{
|
||||
dprintf("%s\n", "get dataset file names...");
|
||||
dprintf("%s\n", "Positives globing...");
|
||||
cv::glob(path + "/pos/octave_" + cv::format("%d", oct) + "/*.png", pos);
|
||||
|
||||
dprintf("%s\n", "Negatives globing...");
|
||||
cv::glob(path + "/neg/octave_" + cv::format("%d", oct) + "/*.png", neg);
|
||||
|
||||
// Check: files not empty
|
||||
CV_Assert(pos.size() != size_t(0));
|
||||
CV_Assert(neg.size() != size_t(0));
|
||||
}
|
||||
|
||||
cv::Mat sft::ScaledDataset::get(SampleType type, int idx) const
|
||||
{
|
||||
const std::string& src = (type == POSITIVE)? pos[idx]: neg[idx];
|
||||
return cv::imread(src);
|
||||
}
|
||||
|
||||
int sft::ScaledDataset::available(SampleType type) const
|
||||
{
|
||||
return (int)((type == POSITIVE)? pos.size():neg.size());
|
||||
}
|
||||
|
||||
sft::ScaledDataset::~ScaledDataset(){}
|
@ -1,74 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2008-2012, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef __SFT_COMMON_HPP__
|
||||
#define __SFT_COMMON_HPP__
|
||||
|
||||
#include <opencv2/core.hpp>
|
||||
#include <opencv2/core/utility.hpp>
|
||||
#include <opencv2/softcascade.hpp>
|
||||
|
||||
namespace cv {using namespace softcascade;}
|
||||
namespace sft
|
||||
{
|
||||
|
||||
using cv::Mat;
|
||||
struct ICF;
|
||||
|
||||
typedef cv::String string;
|
||||
|
||||
typedef std::vector<ICF> Icfvector;
|
||||
typedef std::vector<sft::string> svector;
|
||||
typedef std::vector<int> ivector;
|
||||
}
|
||||
|
||||
// used for noisy printfs
|
||||
//#define WITH_DEBUG_OUT
|
||||
|
||||
#if defined WITH_DEBUG_OUT
|
||||
# include <stdio.h>
|
||||
# define dprintf(format, ...) printf(format, ##__VA_ARGS__)
|
||||
#else
|
||||
# define dprintf(format, ...)
|
||||
#endif
|
||||
|
||||
#endif
|
@ -1,138 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2008-2012, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef __SFT_CONFIG_HPP__
|
||||
#define __SFT_CONFIG_HPP__
|
||||
|
||||
#include <sft/common.hpp>
|
||||
|
||||
#include <ostream>
|
||||
|
||||
namespace sft {
|
||||
|
||||
struct Config
|
||||
{
|
||||
Config();
|
||||
|
||||
void write(cv::FileStorage& fs) const;
|
||||
|
||||
void read(const cv::FileNode& node);
|
||||
|
||||
// Scaled and shrunk model size.
|
||||
cv::Size model(ivector::const_iterator it) const
|
||||
{
|
||||
float octave = powf(2.f, (float)(*it));
|
||||
return cv::Size( cvRound(modelWinSize.width * octave) / shrinkage,
|
||||
cvRound(modelWinSize.height * octave) / shrinkage );
|
||||
}
|
||||
|
||||
// Scaled but, not shrunk bounding box for object in sample image.
|
||||
cv::Rect bbox(ivector::const_iterator it) const
|
||||
{
|
||||
float octave = powf(2.f, (float)(*it));
|
||||
return cv::Rect( cvRound(offset.x * octave), cvRound(offset.y * octave),
|
||||
cvRound(modelWinSize.width * octave), cvRound(modelWinSize.height * octave));
|
||||
}
|
||||
|
||||
string resPath(ivector::const_iterator it) const
|
||||
{
|
||||
return cv::format("%s%d.xml",cascadeName.c_str(), *it);
|
||||
}
|
||||
|
||||
// Paths to a rescaled data
|
||||
string trainPath;
|
||||
string testPath;
|
||||
|
||||
// Original model size.
|
||||
cv::Size modelWinSize;
|
||||
|
||||
// example offset into positive image
|
||||
cv::Point2i offset;
|
||||
|
||||
// List of octaves for which have to be trained cascades (a list of powers of two)
|
||||
ivector octaves;
|
||||
|
||||
// Maximum number of positives that should be used during training
|
||||
int positives;
|
||||
|
||||
// Initial number of negatives used during training.
|
||||
int negatives;
|
||||
|
||||
// Number of weak negatives to add each bootstrapping step.
|
||||
int btpNegatives;
|
||||
|
||||
// Inverse of scale for feature resizing
|
||||
int shrinkage;
|
||||
|
||||
// Depth on weak classifier's decision tree
|
||||
int treeDepth;
|
||||
|
||||
// Weak classifiers number in resulted cascade
|
||||
int weaks;
|
||||
|
||||
// Feature random pool size
|
||||
int poolSize;
|
||||
|
||||
// file name to store cascade
|
||||
string cascadeName;
|
||||
|
||||
// path to resulting cascade
|
||||
string outXmlPath;
|
||||
|
||||
// seed for random generation
|
||||
int seed;
|
||||
|
||||
// channel feature type
|
||||
string featureType;
|
||||
|
||||
// // bounding rectangle for actual example into example window
|
||||
// cv::Rect exampleWindow;
|
||||
};
|
||||
|
||||
// required for cv::FileStorage serialization
|
||||
void write(cv::FileStorage& fs, const string&, const Config& x);
|
||||
void read(const cv::FileNode& node, Config& x, const Config& default_value);
|
||||
std::ostream& operator<<(std::ostream& out, const Config& m);
|
||||
|
||||
}
|
||||
|
||||
#endif
|
@ -1,67 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2008-2012, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef __SFT_OCTAVE_HPP__
|
||||
#define __SFT_OCTAVE_HPP__
|
||||
|
||||
#include <sft/common.hpp>
|
||||
namespace sft
|
||||
{
|
||||
|
||||
using cv::softcascade::Dataset;
|
||||
|
||||
class ScaledDataset : public Dataset
|
||||
{
|
||||
public:
|
||||
ScaledDataset(const sft::string& path, const int octave);
|
||||
|
||||
virtual cv::Mat get(SampleType type, int idx) const;
|
||||
virtual int available(SampleType type) const;
|
||||
virtual ~ScaledDataset();
|
||||
|
||||
private:
|
||||
svector pos;
|
||||
svector neg;
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
168
apps/sft/sft.cpp
@ -1,168 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2008-2012, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
// Training application for Soft Cascades.
|
||||
|
||||
#include <sft/common.hpp>
|
||||
#include <iostream>
|
||||
#include <sft/dataset.hpp>
|
||||
#include <sft/config.hpp>
|
||||
|
||||
#include <opencv2/core/core_c.h>
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
using namespace sft;
|
||||
|
||||
const string keys =
|
||||
"{help h usage ? | | print this message }"
|
||||
"{config c | | path to configuration xml }"
|
||||
;
|
||||
|
||||
cv::CommandLineParser parser(argc, argv, keys);
|
||||
parser.about("Soft cascade training application.");
|
||||
|
||||
if (parser.has("help"))
|
||||
{
|
||||
parser.printMessage();
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!parser.check())
|
||||
{
|
||||
parser.printErrors();
|
||||
return 1;
|
||||
}
|
||||
|
||||
string configPath = parser.get<string>("config");
|
||||
if (configPath.empty())
|
||||
{
|
||||
std::cout << "Configuration file is missing or empty. Could not start training." << std::endl;
|
||||
return 0;
|
||||
}
|
||||
|
||||
std::cout << "Read configuration from file " << configPath << std::endl;
|
||||
cv::FileStorage fs(configPath, cv::FileStorage::READ);
|
||||
if(!fs.isOpened())
|
||||
{
|
||||
std::cout << "Configuration file " << configPath << " can't be opened." << std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
// 1. load config
|
||||
sft::Config cfg;
|
||||
fs["config"] >> cfg;
|
||||
std::cout << std::endl << "Training will be executed for configuration:" << std::endl << cfg << std::endl;
|
||||
|
||||
// 2. check and open output file
|
||||
cv::FileStorage fso(cfg.outXmlPath, cv::FileStorage::WRITE);
|
||||
if(!fso.isOpened())
|
||||
{
|
||||
std::cout << "Training stopped. Output classifier Xml file " << cfg.outXmlPath << " can't be opened." << std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
fso << cfg.cascadeName
|
||||
<< "{"
|
||||
<< "stageType" << "BOOST"
|
||||
<< "featureType" << cfg.featureType
|
||||
<< "octavesNum" << (int)cfg.octaves.size()
|
||||
<< "width" << cfg.modelWinSize.width
|
||||
<< "height" << cfg.modelWinSize.height
|
||||
<< "shrinkage" << cfg.shrinkage
|
||||
<< "octaves" << "[";
|
||||
|
||||
// 3. Train all octaves
|
||||
for (ivector::const_iterator it = cfg.octaves.begin(); it != cfg.octaves.end(); ++it)
|
||||
{
|
||||
// a. create random feature pool
|
||||
int nfeatures = cfg.poolSize;
|
||||
cv::Size model = cfg.model(it);
|
||||
std::cout << "Model " << model << std::endl;
|
||||
|
||||
int nchannels = (cfg.featureType == "HOG6MagLuv") ? 10: 8;
|
||||
|
||||
std::cout << "number of feature channels is " << nchannels << std::endl;
|
||||
|
||||
cv::Ptr<cv::FeaturePool> pool = cv::FeaturePool::create(model, nfeatures, nchannels);
|
||||
nfeatures = pool->size();
|
||||
|
||||
|
||||
int npositives = cfg.positives;
|
||||
int nnegatives = cfg.negatives;
|
||||
int shrinkage = cfg.shrinkage;
|
||||
cv::Rect boundingBox = cfg.bbox(it);
|
||||
std::cout << "Object bounding box" << boundingBox << std::endl;
|
||||
|
||||
typedef cv::Octave Octave;
|
||||
|
||||
cv::Ptr<cv::ChannelFeatureBuilder> builder = cv::ChannelFeatureBuilder::create(cfg.featureType);
|
||||
std::cout << "Channel builder " << builder->info()->name() << std::endl;
|
||||
cv::Ptr<Octave> boost = Octave::create(boundingBox, npositives, nnegatives, *it, shrinkage, builder);
|
||||
|
||||
std::string path = cfg.trainPath;
|
||||
sft::ScaledDataset dataset(path, *it);
|
||||
|
||||
if (boost->train(&dataset, pool, cfg.weaks, cfg.treeDepth))
|
||||
{
|
||||
CvFileStorage* fout = cvOpenFileStorage(cfg.resPath(it).c_str(), 0, CV_STORAGE_WRITE);
|
||||
boost->write(fout, cfg.cascadeName);
|
||||
|
||||
cvReleaseFileStorage( &fout);
|
||||
|
||||
cv::Mat thresholds;
|
||||
boost->setRejectThresholds(thresholds);
|
||||
|
||||
boost->write(fso, pool, thresholds);
|
||||
|
||||
cv::FileStorage tfs(("thresholds." + cfg.resPath(it)).c_str(), cv::FileStorage::WRITE);
|
||||
tfs << "thresholds" << thresholds;
|
||||
|
||||
std::cout << "Octave " << *it << " was successfully trained..." << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
fso << "]" << "}";
|
||||
fso.release();
|
||||
std::cout << "Training complete..." << std::endl;
|
||||
return 0;
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
set(OPENCV_TRAINCASCADE_DEPS opencv_core opencv_ml opencv_imgproc opencv_photo opencv_objdetect opencv_highgui opencv_calib3d opencv_video opencv_features2d opencv_flann opencv_legacy)
|
||||
set(OPENCV_TRAINCASCADE_DEPS opencv_core opencv_ml opencv_imgproc opencv_photo opencv_objdetect opencv_highgui opencv_calib3d opencv_video opencv_features2d)
|
||||
ocv_check_dependencies(${OPENCV_TRAINCASCADE_DEPS})
|
||||
|
||||
if(NOT OCV_DEPENDENCIES_FOUND)
|
||||
@ -20,7 +20,7 @@ set(traincascade_files traincascade.cpp
|
||||
|
||||
set(the_target opencv_traincascade)
|
||||
add_executable(${the_target} ${traincascade_files})
|
||||
target_link_libraries(${the_target} ${OPENCV_TRAINCASCADE_DEPS} opencv_haartraining_engine)
|
||||
target_link_libraries(${the_target} ${OPENCV_TRAINCASCADE_DEPS})
|
||||
|
||||
set_target_properties(${the_target} PROPERTIES
|
||||
DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}"
|
||||
|
@ -65,8 +65,6 @@
|
||||
#include "opencv2/photo/photo_c.h"
|
||||
#include "opencv2/video/tracking_c.h"
|
||||
#include "opencv2/objdetect/objdetect_c.h"
|
||||
#include "opencv2/legacy.hpp"
|
||||
#include "opencv2/legacy/compat.hpp"
|
||||
|
||||
#if !defined(CV_IMPL)
|
||||
#define CV_IMPL extern "C"
|
||||
|
@ -51,7 +51,6 @@
|
||||
#include "opencv2/objdetect.hpp"
|
||||
#include "opencv2/calib3d.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/contrib.hpp"
|
||||
#include "opencv2/ml.hpp"
|
||||
|
||||
#endif
|
||||
|
@ -1 +0,0 @@
|
||||
ocv_define_module(contrib opencv_imgproc opencv_calib3d opencv_ml opencv_video opencv_objdetect OPTIONAL opencv_highgui opencv_nonfree)
|
@ -1,12 +0,0 @@
|
||||
***************************************
|
||||
contrib. Contributed/Experimental Stuff
|
||||
***************************************
|
||||
|
||||
The module contains some recently added functionality that has not been stabilized, or functionality that is considered optional.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
stereo
|
||||
FaceRecognizer Documentation <facerec/index>
|
||||
openfabmap
|
@ -1,107 +0,0 @@
|
||||
ColorMaps in OpenCV
|
||||
===================
|
||||
|
||||
applyColorMap
|
||||
---------------------
|
||||
|
||||
Applies a GNU Octave/MATLAB equivalent colormap on a given image.
|
||||
|
||||
.. ocv:function:: void applyColorMap(InputArray src, OutputArray dst, int colormap)
|
||||
|
||||
:param src: The source image, grayscale or colored does not matter.
|
||||
:param dst: The result is the colormapped source image. Note: :ocv:func:`Mat::create` is called on dst.
|
||||
:param colormap: The colormap to apply, see the list of available colormaps below.
|
||||
|
||||
Currently the following GNU Octave/MATLAB equivalent colormaps are implemented:
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
enum
|
||||
{
|
||||
COLORMAP_AUTUMN = 0,
|
||||
COLORMAP_BONE = 1,
|
||||
COLORMAP_JET = 2,
|
||||
COLORMAP_WINTER = 3,
|
||||
COLORMAP_RAINBOW = 4,
|
||||
COLORMAP_OCEAN = 5,
|
||||
COLORMAP_SUMMER = 6,
|
||||
COLORMAP_SPRING = 7,
|
||||
COLORMAP_COOL = 8,
|
||||
COLORMAP_HSV = 9,
|
||||
COLORMAP_PINK = 10,
|
||||
COLORMAP_HOT = 11
|
||||
}
|
||||
|
||||
|
||||
Description
|
||||
-----------
|
||||
|
||||
The human perception isn't built for observing fine changes in grayscale images. Human eyes are more sensitive to observing changes between colors, so you often need to recolor your grayscale images to get a clue about them. OpenCV now comes with various colormaps to enhance the visualization in your computer vision application.
|
||||
|
||||
In OpenCV 2.4 you only need :ocv:func:`applyColorMap` to apply a colormap on a given image. The following sample code reads the path to an image from command line, applies a Jet colormap on it and shows the result:
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#include <opencv2/contrib.hpp>
|
||||
#include <opencv2/core.hpp>
|
||||
#include <opencv2/highgui.hpp>
|
||||
|
||||
using namespace cv;
|
||||
|
||||
int main(int argc, const char *argv[]) {
|
||||
// Get the path to the image, if it was given
|
||||
// if no arguments were given.
|
||||
String filename;
|
||||
if (argc > 1) {
|
||||
filename = String(argv[1]);
|
||||
}
|
||||
// The following lines show how to apply a colormap on a given image
|
||||
// and show it with cv::imshow example with an image. An exception is
|
||||
// thrown if the path to the image is invalid.
|
||||
if(!filename.empty()) {
|
||||
Mat img0 = imread(filename);
|
||||
// Throw an exception, if the image can't be read:
|
||||
if(img0.empty()) {
|
||||
CV_Error(CV_StsBadArg, "Sample image is empty. Please adjust your path, so it points to a valid input image!");
|
||||
}
|
||||
// Holds the colormap version of the image:
|
||||
Mat cm_img0;
|
||||
// Apply the colormap:
|
||||
applyColorMap(img0, cm_img0, COLORMAP_JET);
|
||||
// Show the result:
|
||||
imshow("cm_img0", cm_img0);
|
||||
waitKey(0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
And here are the color scales for each of the available colormaps:
|
||||
|
||||
+-----------------------+---------------------------------------------------+
|
||||
| Class | Scale |
|
||||
+=======================+===================================================+
|
||||
| COLORMAP_AUTUMN | .. image:: img/colormaps/colorscale_autumn.jpg |
|
||||
+-----------------------+---------------------------------------------------+
|
||||
| COLORMAP_BONE | .. image:: img/colormaps/colorscale_bone.jpg |
|
||||
+-----------------------+---------------------------------------------------+
|
||||
| COLORMAP_COOL | .. image:: img/colormaps/colorscale_cool.jpg |
|
||||
+-----------------------+---------------------------------------------------+
|
||||
| COLORMAP_HOT | .. image:: img/colormaps/colorscale_hot.jpg |
|
||||
+-----------------------+---------------------------------------------------+
|
||||
| COLORMAP_HSV | .. image:: img/colormaps/colorscale_hsv.jpg |
|
||||
+-----------------------+---------------------------------------------------+
|
||||
| COLORMAP_JET | .. image:: img/colormaps/colorscale_jet.jpg |
|
||||
+-----------------------+---------------------------------------------------+
|
||||
| COLORMAP_OCEAN | .. image:: img/colormaps/colorscale_ocean.jpg |
|
||||
+-----------------------+---------------------------------------------------+
|
||||
| COLORMAP_PINK | .. image:: img/colormaps/colorscale_pink.jpg |
|
||||
+-----------------------+---------------------------------------------------+
|
||||
| COLORMAP_RAINBOW | .. image:: img/colormaps/colorscale_rainbow.jpg |
|
||||
+-----------------------+---------------------------------------------------+
|
||||
| COLORMAP_SPRING | .. image:: img/colormaps/colorscale_spring.jpg |
|
||||
+-----------------------+---------------------------------------------------+
|
||||
| COLORMAP_SUMMER | .. image:: img/colormaps/colorscale_summer.jpg |
|
||||
+-----------------------+---------------------------------------------------+
|
||||
| COLORMAP_WINTER | .. image:: img/colormaps/colorscale_winter.jpg |
|
||||
+-----------------------+---------------------------------------------------+
|
@ -1,400 +0,0 @@
|
||||
/home/philipp/facerec/data/at/s13/2.pgm;12
|
||||
/home/philipp/facerec/data/at/s13/7.pgm;12
|
||||
/home/philipp/facerec/data/at/s13/6.pgm;12
|
||||
/home/philipp/facerec/data/at/s13/9.pgm;12
|
||||
/home/philipp/facerec/data/at/s13/5.pgm;12
|
||||
/home/philipp/facerec/data/at/s13/3.pgm;12
|
||||
/home/philipp/facerec/data/at/s13/4.pgm;12
|
||||
/home/philipp/facerec/data/at/s13/10.pgm;12
|
||||
/home/philipp/facerec/data/at/s13/8.pgm;12
|
||||
/home/philipp/facerec/data/at/s13/1.pgm;12
|
||||
/home/philipp/facerec/data/at/s17/2.pgm;16
|
||||
/home/philipp/facerec/data/at/s17/7.pgm;16
|
||||
/home/philipp/facerec/data/at/s17/6.pgm;16
|
||||
/home/philipp/facerec/data/at/s17/9.pgm;16
|
||||
/home/philipp/facerec/data/at/s17/5.pgm;16
|
||||
/home/philipp/facerec/data/at/s17/3.pgm;16
|
||||
/home/philipp/facerec/data/at/s17/4.pgm;16
|
||||
/home/philipp/facerec/data/at/s17/10.pgm;16
|
||||
/home/philipp/facerec/data/at/s17/8.pgm;16
|
||||
/home/philipp/facerec/data/at/s17/1.pgm;16
|
||||
/home/philipp/facerec/data/at/s32/2.pgm;31
|
||||
/home/philipp/facerec/data/at/s32/7.pgm;31
|
||||
/home/philipp/facerec/data/at/s32/6.pgm;31
|
||||
/home/philipp/facerec/data/at/s32/9.pgm;31
|
||||
/home/philipp/facerec/data/at/s32/5.pgm;31
|
||||
/home/philipp/facerec/data/at/s32/3.pgm;31
|
||||
/home/philipp/facerec/data/at/s32/4.pgm;31
|
||||
/home/philipp/facerec/data/at/s32/10.pgm;31
|
||||
/home/philipp/facerec/data/at/s32/8.pgm;31
|
||||
/home/philipp/facerec/data/at/s32/1.pgm;31
|
||||
/home/philipp/facerec/data/at/s10/2.pgm;9
|
||||
/home/philipp/facerec/data/at/s10/7.pgm;9
|
||||
/home/philipp/facerec/data/at/s10/6.pgm;9
|
||||
/home/philipp/facerec/data/at/s10/9.pgm;9
|
||||
/home/philipp/facerec/data/at/s10/5.pgm;9
|
||||
/home/philipp/facerec/data/at/s10/3.pgm;9
|
||||
/home/philipp/facerec/data/at/s10/4.pgm;9
|
||||
/home/philipp/facerec/data/at/s10/10.pgm;9
|
||||
/home/philipp/facerec/data/at/s10/8.pgm;9
|
||||
/home/philipp/facerec/data/at/s10/1.pgm;9
|
||||
/home/philipp/facerec/data/at/s27/2.pgm;26
|
||||
/home/philipp/facerec/data/at/s27/7.pgm;26
|
||||
/home/philipp/facerec/data/at/s27/6.pgm;26
|
||||
/home/philipp/facerec/data/at/s27/9.pgm;26
|
||||
/home/philipp/facerec/data/at/s27/5.pgm;26
|
||||
/home/philipp/facerec/data/at/s27/3.pgm;26
|
||||
/home/philipp/facerec/data/at/s27/4.pgm;26
|
||||
/home/philipp/facerec/data/at/s27/10.pgm;26
|
||||
/home/philipp/facerec/data/at/s27/8.pgm;26
|
||||
/home/philipp/facerec/data/at/s27/1.pgm;26
|
||||
/home/philipp/facerec/data/at/s5/2.pgm;4
|
||||
/home/philipp/facerec/data/at/s5/7.pgm;4
|
||||
/home/philipp/facerec/data/at/s5/6.pgm;4
|
||||
/home/philipp/facerec/data/at/s5/9.pgm;4
|
||||
/home/philipp/facerec/data/at/s5/5.pgm;4
|
||||
/home/philipp/facerec/data/at/s5/3.pgm;4
|
||||
/home/philipp/facerec/data/at/s5/4.pgm;4
|
||||
/home/philipp/facerec/data/at/s5/10.pgm;4
|
||||
/home/philipp/facerec/data/at/s5/8.pgm;4
|
||||
/home/philipp/facerec/data/at/s5/1.pgm;4
|
||||
/home/philipp/facerec/data/at/s20/2.pgm;19
|
||||
/home/philipp/facerec/data/at/s20/7.pgm;19
|
||||
/home/philipp/facerec/data/at/s20/6.pgm;19
|
||||
/home/philipp/facerec/data/at/s20/9.pgm;19
|
||||
/home/philipp/facerec/data/at/s20/5.pgm;19
|
||||
/home/philipp/facerec/data/at/s20/3.pgm;19
|
||||
/home/philipp/facerec/data/at/s20/4.pgm;19
|
||||
/home/philipp/facerec/data/at/s20/10.pgm;19
|
||||
/home/philipp/facerec/data/at/s20/8.pgm;19
|
||||
/home/philipp/facerec/data/at/s20/1.pgm;19
|
||||
/home/philipp/facerec/data/at/s30/2.pgm;29
|
||||
/home/philipp/facerec/data/at/s30/7.pgm;29
|
||||
/home/philipp/facerec/data/at/s30/6.pgm;29
|
||||
/home/philipp/facerec/data/at/s30/9.pgm;29
|
||||
/home/philipp/facerec/data/at/s30/5.pgm;29
|
||||
/home/philipp/facerec/data/at/s30/3.pgm;29
|
||||
/home/philipp/facerec/data/at/s30/4.pgm;29
|
||||
/home/philipp/facerec/data/at/s30/10.pgm;29
|
||||
/home/philipp/facerec/data/at/s30/8.pgm;29
|
||||
/home/philipp/facerec/data/at/s30/1.pgm;29
|
||||
/home/philipp/facerec/data/at/s39/2.pgm;38
|
||||
/home/philipp/facerec/data/at/s39/7.pgm;38
|
||||
/home/philipp/facerec/data/at/s39/6.pgm;38
|
||||
/home/philipp/facerec/data/at/s39/9.pgm;38
|
||||
/home/philipp/facerec/data/at/s39/5.pgm;38
|
||||
/home/philipp/facerec/data/at/s39/3.pgm;38
|
||||
/home/philipp/facerec/data/at/s39/4.pgm;38
|
||||
/home/philipp/facerec/data/at/s39/10.pgm;38
|
||||
/home/philipp/facerec/data/at/s39/8.pgm;38
|
||||
/home/philipp/facerec/data/at/s39/1.pgm;38
|
||||
/home/philipp/facerec/data/at/s35/2.pgm;34
|
||||
/home/philipp/facerec/data/at/s35/7.pgm;34
|
||||
/home/philipp/facerec/data/at/s35/6.pgm;34
|
||||
/home/philipp/facerec/data/at/s35/9.pgm;34
|
||||
/home/philipp/facerec/data/at/s35/5.pgm;34
|
||||
/home/philipp/facerec/data/at/s35/3.pgm;34
|
||||
/home/philipp/facerec/data/at/s35/4.pgm;34
|
||||
/home/philipp/facerec/data/at/s35/10.pgm;34
|
||||
/home/philipp/facerec/data/at/s35/8.pgm;34
|
||||
/home/philipp/facerec/data/at/s35/1.pgm;34
|
||||
/home/philipp/facerec/data/at/s23/2.pgm;22
|
||||
/home/philipp/facerec/data/at/s23/7.pgm;22
|
||||
/home/philipp/facerec/data/at/s23/6.pgm;22
|
||||
/home/philipp/facerec/data/at/s23/9.pgm;22
|
||||
/home/philipp/facerec/data/at/s23/5.pgm;22
|
||||
/home/philipp/facerec/data/at/s23/3.pgm;22
|
||||
/home/philipp/facerec/data/at/s23/4.pgm;22
|
||||
/home/philipp/facerec/data/at/s23/10.pgm;22
|
||||
/home/philipp/facerec/data/at/s23/8.pgm;22
|
||||
/home/philipp/facerec/data/at/s23/1.pgm;22
|
||||
/home/philipp/facerec/data/at/s4/2.pgm;3
|
||||
/home/philipp/facerec/data/at/s4/7.pgm;3
|
||||
/home/philipp/facerec/data/at/s4/6.pgm;3
|
||||
/home/philipp/facerec/data/at/s4/9.pgm;3
|
||||
/home/philipp/facerec/data/at/s4/5.pgm;3
|
||||
/home/philipp/facerec/data/at/s4/3.pgm;3
|
||||
/home/philipp/facerec/data/at/s4/4.pgm;3
|
||||
/home/philipp/facerec/data/at/s4/10.pgm;3
|
||||
/home/philipp/facerec/data/at/s4/8.pgm;3
|
||||
/home/philipp/facerec/data/at/s4/1.pgm;3
|
||||
/home/philipp/facerec/data/at/s9/2.pgm;8
|
||||
/home/philipp/facerec/data/at/s9/7.pgm;8
|
||||
/home/philipp/facerec/data/at/s9/6.pgm;8
|
||||
/home/philipp/facerec/data/at/s9/9.pgm;8
|
||||
/home/philipp/facerec/data/at/s9/5.pgm;8
|
||||
/home/philipp/facerec/data/at/s9/3.pgm;8
|
||||
/home/philipp/facerec/data/at/s9/4.pgm;8
|
||||
/home/philipp/facerec/data/at/s9/10.pgm;8
|
||||
/home/philipp/facerec/data/at/s9/8.pgm;8
|
||||
/home/philipp/facerec/data/at/s9/1.pgm;8
|
||||
/home/philipp/facerec/data/at/s37/2.pgm;36
|
||||
/home/philipp/facerec/data/at/s37/7.pgm;36
|
||||
/home/philipp/facerec/data/at/s37/6.pgm;36
|
||||
/home/philipp/facerec/data/at/s37/9.pgm;36
|
||||
/home/philipp/facerec/data/at/s37/5.pgm;36
|
||||
/home/philipp/facerec/data/at/s37/3.pgm;36
|
||||
/home/philipp/facerec/data/at/s37/4.pgm;36
|
||||
/home/philipp/facerec/data/at/s37/10.pgm;36
|
||||
/home/philipp/facerec/data/at/s37/8.pgm;36
|
||||
/home/philipp/facerec/data/at/s37/1.pgm;36
|
||||
/home/philipp/facerec/data/at/s24/2.pgm;23
|
||||
/home/philipp/facerec/data/at/s24/7.pgm;23
|
||||
/home/philipp/facerec/data/at/s24/6.pgm;23
|
||||
/home/philipp/facerec/data/at/s24/9.pgm;23
|
||||
/home/philipp/facerec/data/at/s24/5.pgm;23
|
||||
/home/philipp/facerec/data/at/s24/3.pgm;23
|
||||
/home/philipp/facerec/data/at/s24/4.pgm;23
|
||||
/home/philipp/facerec/data/at/s24/10.pgm;23
|
||||
/home/philipp/facerec/data/at/s24/8.pgm;23
|
||||
/home/philipp/facerec/data/at/s24/1.pgm;23
|
||||
/home/philipp/facerec/data/at/s19/2.pgm;18
|
||||
/home/philipp/facerec/data/at/s19/7.pgm;18
|
||||
/home/philipp/facerec/data/at/s19/6.pgm;18
|
||||
/home/philipp/facerec/data/at/s19/9.pgm;18
|
||||
/home/philipp/facerec/data/at/s19/5.pgm;18
|
||||
/home/philipp/facerec/data/at/s19/3.pgm;18
|
||||
/home/philipp/facerec/data/at/s19/4.pgm;18
|
||||
/home/philipp/facerec/data/at/s19/10.pgm;18
|
||||
/home/philipp/facerec/data/at/s19/8.pgm;18
|
||||
/home/philipp/facerec/data/at/s19/1.pgm;18
|
||||
/home/philipp/facerec/data/at/s8/2.pgm;7
|
||||
/home/philipp/facerec/data/at/s8/7.pgm;7
|
||||
/home/philipp/facerec/data/at/s8/6.pgm;7
|
||||
/home/philipp/facerec/data/at/s8/9.pgm;7
|
||||
/home/philipp/facerec/data/at/s8/5.pgm;7
|
||||
/home/philipp/facerec/data/at/s8/3.pgm;7
|
||||
/home/philipp/facerec/data/at/s8/4.pgm;7
|
||||
/home/philipp/facerec/data/at/s8/10.pgm;7
|
||||
/home/philipp/facerec/data/at/s8/8.pgm;7
|
||||
/home/philipp/facerec/data/at/s8/1.pgm;7
|
||||
/home/philipp/facerec/data/at/s21/2.pgm;20
|
||||
/home/philipp/facerec/data/at/s21/7.pgm;20
|
||||
/home/philipp/facerec/data/at/s21/6.pgm;20
|
||||
/home/philipp/facerec/data/at/s21/9.pgm;20
|
||||
/home/philipp/facerec/data/at/s21/5.pgm;20
|
||||
/home/philipp/facerec/data/at/s21/3.pgm;20
|
||||
/home/philipp/facerec/data/at/s21/4.pgm;20
|
||||
/home/philipp/facerec/data/at/s21/10.pgm;20
|
||||
/home/philipp/facerec/data/at/s21/8.pgm;20
|
||||
/home/philipp/facerec/data/at/s21/1.pgm;20
|
||||
/home/philipp/facerec/data/at/s1/2.pgm;0
|
||||
/home/philipp/facerec/data/at/s1/7.pgm;0
|
||||
/home/philipp/facerec/data/at/s1/6.pgm;0
|
||||
/home/philipp/facerec/data/at/s1/9.pgm;0
|
||||
/home/philipp/facerec/data/at/s1/5.pgm;0
|
||||
/home/philipp/facerec/data/at/s1/3.pgm;0
|
||||
/home/philipp/facerec/data/at/s1/4.pgm;0
|
||||
/home/philipp/facerec/data/at/s1/10.pgm;0
|
||||
/home/philipp/facerec/data/at/s1/8.pgm;0
|
||||
/home/philipp/facerec/data/at/s1/1.pgm;0
|
||||
/home/philipp/facerec/data/at/s7/2.pgm;6
|
||||
/home/philipp/facerec/data/at/s7/7.pgm;6
|
||||
/home/philipp/facerec/data/at/s7/6.pgm;6
|
||||
/home/philipp/facerec/data/at/s7/9.pgm;6
|
||||
/home/philipp/facerec/data/at/s7/5.pgm;6
|
||||
/home/philipp/facerec/data/at/s7/3.pgm;6
|
||||
/home/philipp/facerec/data/at/s7/4.pgm;6
|
||||
/home/philipp/facerec/data/at/s7/10.pgm;6
|
||||
/home/philipp/facerec/data/at/s7/8.pgm;6
|
||||
/home/philipp/facerec/data/at/s7/1.pgm;6
|
||||
/home/philipp/facerec/data/at/s16/2.pgm;15
|
||||
/home/philipp/facerec/data/at/s16/7.pgm;15
|
||||
/home/philipp/facerec/data/at/s16/6.pgm;15
|
||||
/home/philipp/facerec/data/at/s16/9.pgm;15
|
||||
/home/philipp/facerec/data/at/s16/5.pgm;15
|
||||
/home/philipp/facerec/data/at/s16/3.pgm;15
|
||||
/home/philipp/facerec/data/at/s16/4.pgm;15
|
||||
/home/philipp/facerec/data/at/s16/10.pgm;15
|
||||
/home/philipp/facerec/data/at/s16/8.pgm;15
|
||||
/home/philipp/facerec/data/at/s16/1.pgm;15
|
||||
/home/philipp/facerec/data/at/s36/2.pgm;35
|
||||
/home/philipp/facerec/data/at/s36/7.pgm;35
|
||||
/home/philipp/facerec/data/at/s36/6.pgm;35
|
||||
/home/philipp/facerec/data/at/s36/9.pgm;35
|
||||
/home/philipp/facerec/data/at/s36/5.pgm;35
|
||||
/home/philipp/facerec/data/at/s36/3.pgm;35
|
||||
/home/philipp/facerec/data/at/s36/4.pgm;35
|
||||
/home/philipp/facerec/data/at/s36/10.pgm;35
|
||||
/home/philipp/facerec/data/at/s36/8.pgm;35
|
||||
/home/philipp/facerec/data/at/s36/1.pgm;35
|
||||
/home/philipp/facerec/data/at/s25/2.pgm;24
|
||||
/home/philipp/facerec/data/at/s25/7.pgm;24
|
||||
/home/philipp/facerec/data/at/s25/6.pgm;24
|
||||
/home/philipp/facerec/data/at/s25/9.pgm;24
|
||||
/home/philipp/facerec/data/at/s25/5.pgm;24
|
||||
/home/philipp/facerec/data/at/s25/3.pgm;24
|
||||
/home/philipp/facerec/data/at/s25/4.pgm;24
|
||||
/home/philipp/facerec/data/at/s25/10.pgm;24
|
||||
/home/philipp/facerec/data/at/s25/8.pgm;24
|
||||
/home/philipp/facerec/data/at/s25/1.pgm;24
|
||||
/home/philipp/facerec/data/at/s14/2.pgm;13
|
||||
/home/philipp/facerec/data/at/s14/7.pgm;13
|
||||
/home/philipp/facerec/data/at/s14/6.pgm;13
|
||||
/home/philipp/facerec/data/at/s14/9.pgm;13
|
||||
/home/philipp/facerec/data/at/s14/5.pgm;13
|
||||
/home/philipp/facerec/data/at/s14/3.pgm;13
|
||||
/home/philipp/facerec/data/at/s14/4.pgm;13
|
||||
/home/philipp/facerec/data/at/s14/10.pgm;13
|
||||
/home/philipp/facerec/data/at/s14/8.pgm;13
|
||||
/home/philipp/facerec/data/at/s14/1.pgm;13
|
||||
/home/philipp/facerec/data/at/s34/2.pgm;33
|
||||
/home/philipp/facerec/data/at/s34/7.pgm;33
|
||||
/home/philipp/facerec/data/at/s34/6.pgm;33
|
||||
/home/philipp/facerec/data/at/s34/9.pgm;33
|
||||
/home/philipp/facerec/data/at/s34/5.pgm;33
|
||||
/home/philipp/facerec/data/at/s34/3.pgm;33
|
||||
/home/philipp/facerec/data/at/s34/4.pgm;33
|
||||
/home/philipp/facerec/data/at/s34/10.pgm;33
|
||||
/home/philipp/facerec/data/at/s34/8.pgm;33
|
||||
/home/philipp/facerec/data/at/s34/1.pgm;33
|
||||
/home/philipp/facerec/data/at/s11/2.pgm;10
|
||||
/home/philipp/facerec/data/at/s11/7.pgm;10
|
||||
/home/philipp/facerec/data/at/s11/6.pgm;10
|
||||
/home/philipp/facerec/data/at/s11/9.pgm;10
|
||||
/home/philipp/facerec/data/at/s11/5.pgm;10
|
||||
/home/philipp/facerec/data/at/s11/3.pgm;10
|
||||
/home/philipp/facerec/data/at/s11/4.pgm;10
|
||||
/home/philipp/facerec/data/at/s11/10.pgm;10
|
||||
/home/philipp/facerec/data/at/s11/8.pgm;10
|
||||
/home/philipp/facerec/data/at/s11/1.pgm;10
|
||||
/home/philipp/facerec/data/at/s26/2.pgm;25
|
||||
/home/philipp/facerec/data/at/s26/7.pgm;25
|
||||
/home/philipp/facerec/data/at/s26/6.pgm;25
|
||||
/home/philipp/facerec/data/at/s26/9.pgm;25
|
||||
/home/philipp/facerec/data/at/s26/5.pgm;25
|
||||
/home/philipp/facerec/data/at/s26/3.pgm;25
|
||||
/home/philipp/facerec/data/at/s26/4.pgm;25
|
||||
/home/philipp/facerec/data/at/s26/10.pgm;25
|
||||
/home/philipp/facerec/data/at/s26/8.pgm;25
|
||||
/home/philipp/facerec/data/at/s26/1.pgm;25
|
||||
/home/philipp/facerec/data/at/s18/2.pgm;17
|
||||
/home/philipp/facerec/data/at/s18/7.pgm;17
|
||||
/home/philipp/facerec/data/at/s18/6.pgm;17
|
||||
/home/philipp/facerec/data/at/s18/9.pgm;17
|
||||
/home/philipp/facerec/data/at/s18/5.pgm;17
|
||||
/home/philipp/facerec/data/at/s18/3.pgm;17
|
||||
/home/philipp/facerec/data/at/s18/4.pgm;17
|
||||
/home/philipp/facerec/data/at/s18/10.pgm;17
|
||||
/home/philipp/facerec/data/at/s18/8.pgm;17
|
||||
/home/philipp/facerec/data/at/s18/1.pgm;17
|
||||
/home/philipp/facerec/data/at/s29/2.pgm;28
|
||||
/home/philipp/facerec/data/at/s29/7.pgm;28
|
||||
/home/philipp/facerec/data/at/s29/6.pgm;28
|
||||
/home/philipp/facerec/data/at/s29/9.pgm;28
|
||||
/home/philipp/facerec/data/at/s29/5.pgm;28
|
||||
/home/philipp/facerec/data/at/s29/3.pgm;28
|
||||
/home/philipp/facerec/data/at/s29/4.pgm;28
|
||||
/home/philipp/facerec/data/at/s29/10.pgm;28
|
||||
/home/philipp/facerec/data/at/s29/8.pgm;28
|
||||
/home/philipp/facerec/data/at/s29/1.pgm;28
|
||||
/home/philipp/facerec/data/at/s33/2.pgm;32
|
||||
/home/philipp/facerec/data/at/s33/7.pgm;32
|
||||
/home/philipp/facerec/data/at/s33/6.pgm;32
|
||||
/home/philipp/facerec/data/at/s33/9.pgm;32
|
||||
/home/philipp/facerec/data/at/s33/5.pgm;32
|
||||
/home/philipp/facerec/data/at/s33/3.pgm;32
|
||||
/home/philipp/facerec/data/at/s33/4.pgm;32
|
||||
/home/philipp/facerec/data/at/s33/10.pgm;32
|
||||
/home/philipp/facerec/data/at/s33/8.pgm;32
|
||||
/home/philipp/facerec/data/at/s33/1.pgm;32
|
||||
/home/philipp/facerec/data/at/s12/2.pgm;11
|
||||
/home/philipp/facerec/data/at/s12/7.pgm;11
|
||||
/home/philipp/facerec/data/at/s12/6.pgm;11
|
||||
/home/philipp/facerec/data/at/s12/9.pgm;11
|
||||
/home/philipp/facerec/data/at/s12/5.pgm;11
|
||||
/home/philipp/facerec/data/at/s12/3.pgm;11
|
||||
/home/philipp/facerec/data/at/s12/4.pgm;11
|
||||
/home/philipp/facerec/data/at/s12/10.pgm;11
|
||||
/home/philipp/facerec/data/at/s12/8.pgm;11
|
||||
/home/philipp/facerec/data/at/s12/1.pgm;11
|
||||
/home/philipp/facerec/data/at/s6/2.pgm;5
|
||||
/home/philipp/facerec/data/at/s6/7.pgm;5
|
||||
/home/philipp/facerec/data/at/s6/6.pgm;5
|
||||
/home/philipp/facerec/data/at/s6/9.pgm;5
|
||||
/home/philipp/facerec/data/at/s6/5.pgm;5
|
||||
/home/philipp/facerec/data/at/s6/3.pgm;5
|
||||
/home/philipp/facerec/data/at/s6/4.pgm;5
|
||||
/home/philipp/facerec/data/at/s6/10.pgm;5
|
||||
/home/philipp/facerec/data/at/s6/8.pgm;5
|
||||
/home/philipp/facerec/data/at/s6/1.pgm;5
|
||||
/home/philipp/facerec/data/at/s22/2.pgm;21
|
||||
/home/philipp/facerec/data/at/s22/7.pgm;21
|
||||
/home/philipp/facerec/data/at/s22/6.pgm;21
|
||||
/home/philipp/facerec/data/at/s22/9.pgm;21
|
||||
/home/philipp/facerec/data/at/s22/5.pgm;21
|
||||
/home/philipp/facerec/data/at/s22/3.pgm;21
|
||||
/home/philipp/facerec/data/at/s22/4.pgm;21
|
||||
/home/philipp/facerec/data/at/s22/10.pgm;21
|
||||
/home/philipp/facerec/data/at/s22/8.pgm;21
|
||||
/home/philipp/facerec/data/at/s22/1.pgm;21
|
||||
/home/philipp/facerec/data/at/s15/2.pgm;14
|
||||
/home/philipp/facerec/data/at/s15/7.pgm;14
|
||||
/home/philipp/facerec/data/at/s15/6.pgm;14
|
||||
/home/philipp/facerec/data/at/s15/9.pgm;14
|
||||
/home/philipp/facerec/data/at/s15/5.pgm;14
|
||||
/home/philipp/facerec/data/at/s15/3.pgm;14
|
||||
/home/philipp/facerec/data/at/s15/4.pgm;14
|
||||
/home/philipp/facerec/data/at/s15/10.pgm;14
|
||||
/home/philipp/facerec/data/at/s15/8.pgm;14
|
||||
/home/philipp/facerec/data/at/s15/1.pgm;14
|
||||
/home/philipp/facerec/data/at/s2/2.pgm;1
|
||||
/home/philipp/facerec/data/at/s2/7.pgm;1
|
||||
/home/philipp/facerec/data/at/s2/6.pgm;1
|
||||
/home/philipp/facerec/data/at/s2/9.pgm;1
|
||||
/home/philipp/facerec/data/at/s2/5.pgm;1
|
||||
/home/philipp/facerec/data/at/s2/3.pgm;1
|
||||
/home/philipp/facerec/data/at/s2/4.pgm;1
|
||||
/home/philipp/facerec/data/at/s2/10.pgm;1
|
||||
/home/philipp/facerec/data/at/s2/8.pgm;1
|
||||
/home/philipp/facerec/data/at/s2/1.pgm;1
|
||||
/home/philipp/facerec/data/at/s31/2.pgm;30
|
||||
/home/philipp/facerec/data/at/s31/7.pgm;30
|
||||
/home/philipp/facerec/data/at/s31/6.pgm;30
|
||||
/home/philipp/facerec/data/at/s31/9.pgm;30
|
||||
/home/philipp/facerec/data/at/s31/5.pgm;30
|
||||
/home/philipp/facerec/data/at/s31/3.pgm;30
|
||||
/home/philipp/facerec/data/at/s31/4.pgm;30
|
||||
/home/philipp/facerec/data/at/s31/10.pgm;30
|
||||
/home/philipp/facerec/data/at/s31/8.pgm;30
|
||||
/home/philipp/facerec/data/at/s31/1.pgm;30
|
||||
/home/philipp/facerec/data/at/s28/2.pgm;27
|
||||
/home/philipp/facerec/data/at/s28/7.pgm;27
|
||||
/home/philipp/facerec/data/at/s28/6.pgm;27
|
||||
/home/philipp/facerec/data/at/s28/9.pgm;27
|
||||
/home/philipp/facerec/data/at/s28/5.pgm;27
|
||||
/home/philipp/facerec/data/at/s28/3.pgm;27
|
||||
/home/philipp/facerec/data/at/s28/4.pgm;27
|
||||
/home/philipp/facerec/data/at/s28/10.pgm;27
|
||||
/home/philipp/facerec/data/at/s28/8.pgm;27
|
||||
/home/philipp/facerec/data/at/s28/1.pgm;27
|
||||
/home/philipp/facerec/data/at/s40/2.pgm;39
|
||||
/home/philipp/facerec/data/at/s40/7.pgm;39
|
||||
/home/philipp/facerec/data/at/s40/6.pgm;39
|
||||
/home/philipp/facerec/data/at/s40/9.pgm;39
|
||||
/home/philipp/facerec/data/at/s40/5.pgm;39
|
||||
/home/philipp/facerec/data/at/s40/3.pgm;39
|
||||
/home/philipp/facerec/data/at/s40/4.pgm;39
|
||||
/home/philipp/facerec/data/at/s40/10.pgm;39
|
||||
/home/philipp/facerec/data/at/s40/8.pgm;39
|
||||
/home/philipp/facerec/data/at/s40/1.pgm;39
|
||||
/home/philipp/facerec/data/at/s3/2.pgm;2
|
||||
/home/philipp/facerec/data/at/s3/7.pgm;2
|
||||
/home/philipp/facerec/data/at/s3/6.pgm;2
|
||||
/home/philipp/facerec/data/at/s3/9.pgm;2
|
||||
/home/philipp/facerec/data/at/s3/5.pgm;2
|
||||
/home/philipp/facerec/data/at/s3/3.pgm;2
|
||||
/home/philipp/facerec/data/at/s3/4.pgm;2
|
||||
/home/philipp/facerec/data/at/s3/10.pgm;2
|
||||
/home/philipp/facerec/data/at/s3/8.pgm;2
|
||||
/home/philipp/facerec/data/at/s3/1.pgm;2
|
||||
/home/philipp/facerec/data/at/s38/2.pgm;37
|
||||
/home/philipp/facerec/data/at/s38/7.pgm;37
|
||||
/home/philipp/facerec/data/at/s38/6.pgm;37
|
||||
/home/philipp/facerec/data/at/s38/9.pgm;37
|
||||
/home/philipp/facerec/data/at/s38/5.pgm;37
|
||||
/home/philipp/facerec/data/at/s38/3.pgm;37
|
||||
/home/philipp/facerec/data/at/s38/4.pgm;37
|
||||
/home/philipp/facerec/data/at/s38/10.pgm;37
|
||||
/home/philipp/facerec/data/at/s38/8.pgm;37
|
||||
/home/philipp/facerec/data/at/s38/1.pgm;37
|
@ -1,377 +0,0 @@
|
||||
FaceRecognizer
|
||||
==============
|
||||
|
||||
.. highlight:: cpp
|
||||
|
||||
.. Sample code::
|
||||
|
||||
* An example using the FaceRecognizer class can be found at opencv_source_code/samples/cpp/facerec_demo.cpp
|
||||
|
||||
* (Python) An example using the FaceRecognizer class can be found at opencv_source_code/samples/python2/facerec_demo.py
|
||||
|
||||
FaceRecognizer
|
||||
--------------
|
||||
|
||||
.. ocv:class:: FaceRecognizer : public Algorithm
|
||||
|
||||
All face recognition models in OpenCV are derived from the abstract base class :ocv:class:`FaceRecognizer`, which provides
|
||||
a unified access to all face recongition algorithms in OpenCV. ::
|
||||
|
||||
class FaceRecognizer : public Algorithm
|
||||
{
|
||||
public:
|
||||
//! virtual destructor
|
||||
virtual ~FaceRecognizer() {}
|
||||
|
||||
// Trains a FaceRecognizer.
|
||||
virtual void train(InputArray src, InputArray labels) = 0;
|
||||
|
||||
// Updates a FaceRecognizer.
|
||||
virtual void update(InputArrayOfArrays src, InputArray labels);
|
||||
|
||||
// Gets a prediction from a FaceRecognizer.
|
||||
virtual int predict(InputArray src) const = 0;
|
||||
|
||||
// Predicts the label and confidence for a given sample.
|
||||
virtual void predict(InputArray src, int &label, double &confidence) const = 0;
|
||||
|
||||
// Serializes this object to a given filename.
|
||||
virtual void save(const String& filename) const;
|
||||
|
||||
// Deserializes this object from a given filename.
|
||||
virtual void load(const String& filename);
|
||||
|
||||
// Serializes this object to a given cv::FileStorage.
|
||||
virtual void save(FileStorage& fs) const = 0;
|
||||
|
||||
// Deserializes this object from a given cv::FileStorage.
|
||||
virtual void load(const FileStorage& fs) = 0;
|
||||
};
|
||||
|
||||
|
||||
Description
|
||||
+++++++++++
|
||||
|
||||
I'll go a bit more into detail explaining :ocv:class:`FaceRecognizer`, because it doesn't look like a powerful interface at first sight. But: Every :ocv:class:`FaceRecognizer` is an :ocv:class:`Algorithm`, so you can easily get/set all model internals (if allowed by the implementation). :ocv:class:`Algorithm` is a relatively new OpenCV concept, which is available since the 2.4 release. I suggest you take a look at its description.
|
||||
|
||||
:ocv:class:`Algorithm` provides the following features for all derived classes:
|
||||
|
||||
* So called “virtual constructor”. That is, each Algorithm derivative is registered at program start and you can get the list of registered algorithms and create instance of a particular algorithm by its name (see :ocv:func:`Algorithm::create`). If you plan to add your own algorithms, it is good practice to add a unique prefix to your algorithms to distinguish them from other algorithms.
|
||||
|
||||
* Setting/Retrieving algorithm parameters by name. If you used video capturing functionality from OpenCV highgui module, you are probably familar with :ocv:cfunc:`cvSetCaptureProperty`, :ocv:cfunc:`cvGetCaptureProperty`, :ocv:func:`VideoCapture::set` and :ocv:func:`VideoCapture::get`. :ocv:class:`Algorithm` provides similar method where instead of integer id's you specify the parameter names as text Strings. See :ocv:func:`Algorithm::set` and :ocv:func:`Algorithm::get` for details.
|
||||
|
||||
* Reading and writing parameters from/to XML or YAML files. Every Algorithm derivative can store all its parameters and then read them back. There is no need to re-implement it each time.
|
||||
|
||||
Moreover every :ocv:class:`FaceRecognizer` supports the:
|
||||
|
||||
* **Training** of a :ocv:class:`FaceRecognizer` with :ocv:func:`FaceRecognizer::train` on a given set of images (your face database!).
|
||||
|
||||
* **Prediction** of a given sample image, that means a face. The image is given as a :ocv:class:`Mat`.
|
||||
|
||||
* **Loading/Saving** the model state from/to a given XML or YAML.
|
||||
|
||||
.. note:: When using the FaceRecognizer interface in combination with Python, please stick to Python 2. Some underlying scripts like create_csv will not work in other versions, like Python 3.
|
||||
|
||||
Setting the Thresholds
|
||||
+++++++++++++++++++++++
|
||||
|
||||
Sometimes you run into the situation, when you want to apply a threshold on the prediction. A common scenario in face recognition is to tell, whether a face belongs to the training dataset or if it is unknown. You might wonder, why there's no public API in :ocv:class:`FaceRecognizer` to set the threshold for the prediction, but rest assured: It's supported. It just means there's no generic way in an abstract class to provide an interface for setting/getting the thresholds of *every possible* :ocv:class:`FaceRecognizer` algorithm. The appropriate place to set the thresholds is in the constructor of the specific :ocv:class:`FaceRecognizer` and since every :ocv:class:`FaceRecognizer` is a :ocv:class:`Algorithm` (see above), you can get/set the thresholds at runtime!
|
||||
|
||||
Here is an example of setting a threshold for the Eigenfaces method, when creating the model:
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
// Let's say we want to keep 10 Eigenfaces and have a threshold value of 10.0
|
||||
int num_components = 10;
|
||||
double threshold = 10.0;
|
||||
// Then if you want to have a cv::FaceRecognizer with a confidence threshold,
|
||||
// create the concrete implementation with the appropiate parameters:
|
||||
Ptr<FaceRecognizer> model = createEigenFaceRecognizer(num_components, threshold);
|
||||
|
||||
Sometimes it's impossible to train the model, just to experiment with threshold values. Thanks to :ocv:class:`Algorithm` it's possible to set internal model thresholds during runtime. Let's see how we would set/get the prediction for the Eigenface model, we've created above:
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
// The following line reads the threshold from the Eigenfaces model:
|
||||
double current_threshold = model->getDouble("threshold");
|
||||
// And this line sets the threshold to 0.0:
|
||||
model->set("threshold", 0.0);
|
||||
|
||||
If you've set the threshold to ``0.0`` as we did above, then:
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
//
|
||||
Mat img = imread("person1/3.jpg", CV_LOAD_IMAGE_GRAYSCALE);
|
||||
// Get a prediction from the model. Note: We've set a threshold of 0.0 above,
|
||||
// since the distance is almost always larger than 0.0, you'll get -1 as
|
||||
// label, which indicates, this face is unknown
|
||||
int predicted_label = model->predict(img);
|
||||
// ...
|
||||
|
||||
is going to yield ``-1`` as predicted label, which states this face is unknown.
|
||||
|
||||
Getting the name of a FaceRecognizer
|
||||
+++++++++++++++++++++++++++++++++++++
|
||||
|
||||
Since every :ocv:class:`FaceRecognizer` is a :ocv:class:`Algorithm`, you can use :ocv:func:`Algorithm::name` to get the name of a :ocv:class:`FaceRecognizer`:
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
// Create a FaceRecognizer:
|
||||
Ptr<FaceRecognizer> model = createEigenFaceRecognizer();
|
||||
// And here's how to get its name:
|
||||
String name = model->name();
|
||||
|
||||
|
||||
FaceRecognizer::train
|
||||
---------------------
|
||||
|
||||
Trains a FaceRecognizer with given data and associated labels.
|
||||
|
||||
.. ocv:function:: void FaceRecognizer::train( InputArrayOfArrays src, InputArray labels ) = 0
|
||||
|
||||
:param src: The training images, that means the faces you want to learn. The data has to be given as a ``vector<Mat>``.
|
||||
|
||||
:param labels: The labels corresponding to the images have to be given either as a ``vector<int>`` or a
|
||||
|
||||
The following source code snippet shows you how to learn a Fisherfaces model on a given set of images. The images are read with :ocv:func:`imread` and pushed into a ``std::vector<Mat>``. The labels of each image are stored within a ``std::vector<int>`` (you could also use a :ocv:class:`Mat` of type `CV_32SC1`). Think of the label as the subject (the person) this image belongs to, so same subjects (persons) should have the same label. For the available :ocv:class:`FaceRecognizer` you don't have to pay any attention to the order of the labels, just make sure same persons have the same label:
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
// holds images and labels
|
||||
vector<Mat> images;
|
||||
vector<int> labels;
|
||||
// images for first person
|
||||
images.push_back(imread("person0/0.jpg", CV_LOAD_IMAGE_GRAYSCALE)); labels.push_back(0);
|
||||
images.push_back(imread("person0/1.jpg", CV_LOAD_IMAGE_GRAYSCALE)); labels.push_back(0);
|
||||
images.push_back(imread("person0/2.jpg", CV_LOAD_IMAGE_GRAYSCALE)); labels.push_back(0);
|
||||
// images for second person
|
||||
images.push_back(imread("person1/0.jpg", CV_LOAD_IMAGE_GRAYSCALE)); labels.push_back(1);
|
||||
images.push_back(imread("person1/1.jpg", CV_LOAD_IMAGE_GRAYSCALE)); labels.push_back(1);
|
||||
images.push_back(imread("person1/2.jpg", CV_LOAD_IMAGE_GRAYSCALE)); labels.push_back(1);
|
||||
|
||||
Now that you have read some images, we can create a new :ocv:class:`FaceRecognizer`. In this example I'll create a Fisherfaces model and decide to keep all of the possible Fisherfaces:
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
// Create a new Fisherfaces model and retain all available Fisherfaces,
|
||||
// this is the most common usage of this specific FaceRecognizer:
|
||||
//
|
||||
Ptr<FaceRecognizer> model = createFisherFaceRecognizer();
|
||||
|
||||
And finally train it on the given dataset (the face images and labels):
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
// This is the common interface to train all of the available cv::FaceRecognizer
|
||||
// implementations:
|
||||
//
|
||||
model->train(images, labels);
|
||||
|
||||
FaceRecognizer::update
|
||||
----------------------
|
||||
|
||||
Updates a FaceRecognizer with given data and associated labels.
|
||||
|
||||
.. ocv:function:: void FaceRecognizer::update( InputArrayOfArrays src, InputArray labels )
|
||||
|
||||
:param src: The training images, that means the faces you want to learn. The data has to be given as a ``vector<Mat>``.
|
||||
|
||||
:param labels: The labels corresponding to the images have to be given either as a ``vector<int>`` or a
|
||||
|
||||
This method updates a (probably trained) :ocv:class:`FaceRecognizer`, but only if the algorithm supports it. The Local Binary Patterns Histograms (LBPH) recognizer (see :ocv:func:`createLBPHFaceRecognizer`) can be updated. For the Eigenfaces and Fisherfaces method, this is algorithmically not possible and you have to re-estimate the model with :ocv:func:`FaceRecognizer::train`. In any case, a call to train empties the existing model and learns a new model, while update does not delete any model data.
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
// Create a new LBPH model (it can be updated) and use the default parameters,
|
||||
// this is the most common usage of this specific FaceRecognizer:
|
||||
//
|
||||
Ptr<FaceRecognizer> model = createLBPHFaceRecognizer();
|
||||
// This is the common interface to train all of the available cv::FaceRecognizer
|
||||
// implementations:
|
||||
//
|
||||
model->train(images, labels);
|
||||
// Some containers to hold new image:
|
||||
vector<Mat> newImages;
|
||||
vector<int> newLabels;
|
||||
// You should add some images to the containers:
|
||||
//
|
||||
// ...
|
||||
//
|
||||
// Now updating the model is as easy as calling:
|
||||
model->update(newImages,newLabels);
|
||||
// This will preserve the old model data and extend the existing model
|
||||
// with the new features extracted from newImages!
|
||||
|
||||
Calling update on an Eigenfaces model (see :ocv:func:`createEigenFaceRecognizer`), which doesn't support updating, will throw an error similar to:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
OpenCV Error: The function/feature is not implemented (This FaceRecognizer (FaceRecognizer.Eigenfaces) does not support updating, you have to use FaceRecognizer::train to update it.) in update, file /home/philipp/git/opencv/modules/contrib/src/facerec.cpp, line 305
|
||||
terminate called after throwing an instance of 'cv::Exception'
|
||||
|
||||
Please note: The :ocv:class:`FaceRecognizer` does not store your training images, because this would be very memory intense and it's not the responsibility of te :ocv:class:`FaceRecognizer` to do so. The caller is responsible for maintaining the dataset, he want to work with.
|
||||
|
||||
FaceRecognizer::predict
|
||||
-----------------------
|
||||
|
||||
.. ocv:function:: int FaceRecognizer::predict( InputArray src ) const = 0
|
||||
.. ocv:function:: void FaceRecognizer::predict( InputArray src, int & label, double & confidence ) const = 0
|
||||
|
||||
Predicts a label and associated confidence (e.g. distance) for a given input image.
|
||||
|
||||
:param src: Sample image to get a prediction from.
|
||||
:param label: The predicted label for the given image.
|
||||
:param confidence: Associated confidence (e.g. distance) for the predicted label.
|
||||
|
||||
The suffix ``const`` means that prediction does not affect the internal model
|
||||
state, so the method can be safely called from within different threads.
|
||||
|
||||
The following example shows how to get a prediction from a trained model:
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
using namespace cv;
|
||||
// Do your initialization here (create the cv::FaceRecognizer model) ...
|
||||
// ...
|
||||
// Read in a sample image:
|
||||
Mat img = imread("person1/3.jpg", CV_LOAD_IMAGE_GRAYSCALE);
|
||||
// And get a prediction from the cv::FaceRecognizer:
|
||||
int predicted = model->predict(img);
|
||||
|
||||
Or to get a prediction and the associated confidence (e.g. distance):
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
using namespace cv;
|
||||
// Do your initialization here (create the cv::FaceRecognizer model) ...
|
||||
// ...
|
||||
Mat img = imread("person1/3.jpg", CV_LOAD_IMAGE_GRAYSCALE);
|
||||
// Some variables for the predicted label and associated confidence (e.g. distance):
|
||||
int predicted_label = -1;
|
||||
double predicted_confidence = 0.0;
|
||||
// Get the prediction and associated confidence from the model
|
||||
model->predict(img, predicted_label, predicted_confidence);
|
||||
|
||||
FaceRecognizer::save
|
||||
--------------------
|
||||
|
||||
Saves a :ocv:class:`FaceRecognizer` and its model state.
|
||||
|
||||
.. ocv:function:: void FaceRecognizer::save(const String& filename) const
|
||||
|
||||
Saves this model to a given filename, either as XML or YAML.
|
||||
|
||||
:param filename: The filename to store this :ocv:class:`FaceRecognizer` to (either XML/YAML).
|
||||
|
||||
.. ocv:function:: void FaceRecognizer::save(FileStorage& fs) const
|
||||
|
||||
Saves this model to a given :ocv:class:`FileStorage`.
|
||||
|
||||
:param fs: The :ocv:class:`FileStorage` to store this :ocv:class:`FaceRecognizer` to.
|
||||
|
||||
|
||||
Every :ocv:class:`FaceRecognizer` overwrites ``FaceRecognizer::save(FileStorage& fs)``
|
||||
to save the internal model state. ``FaceRecognizer::save(const String& filename)`` saves
|
||||
the state of a model to the given filename.
|
||||
|
||||
The suffix ``const`` means that prediction does not affect the internal model
|
||||
state, so the method can be safely called from within different threads.
|
||||
|
||||
FaceRecognizer::load
|
||||
--------------------
|
||||
|
||||
Loads a :ocv:class:`FaceRecognizer` and its model state.
|
||||
|
||||
.. ocv:function:: void FaceRecognizer::load( const String& filename )
|
||||
.. ocv:function:: void FaceRecognizer::load( const FileStorage& fs ) = 0
|
||||
|
||||
Loads a persisted model and state from a given XML or YAML file . Every
|
||||
:ocv:class:`FaceRecognizer` has to overwrite ``FaceRecognizer::load(FileStorage& fs)``
|
||||
to enable loading the model state. ``FaceRecognizer::load(FileStorage& fs)`` in
|
||||
turn gets called by ``FaceRecognizer::load(const String& filename)``, to ease
|
||||
saving a model.
|
||||
|
||||
createEigenFaceRecognizer
|
||||
-------------------------
|
||||
|
||||
.. ocv:function:: Ptr<FaceRecognizer> createEigenFaceRecognizer(int num_components = 0, double threshold = DBL_MAX)
|
||||
|
||||
:param num_components: The number of components (read: Eigenfaces) kept for this Prinicpal Component Analysis. As a hint: There's no rule how many components (read: Eigenfaces) should be kept for good reconstruction capabilities. It is based on your input data, so experiment with the number. Keeping 80 components should almost always be sufficient.
|
||||
|
||||
:param threshold: The threshold applied in the prediciton.
|
||||
|
||||
Notes:
|
||||
++++++
|
||||
|
||||
* Training and prediction must be done on grayscale images, use :ocv:func:`cvtColor` to convert between the color spaces.
|
||||
* **THE EIGENFACES METHOD MAKES THE ASSUMPTION, THAT THE TRAINING AND TEST IMAGES ARE OF EQUAL SIZE.** (caps-lock, because I got so many mails asking for this). You have to make sure your input data has the correct shape, else a meaningful exception is thrown. Use :ocv:func:`resize` to resize the images.
|
||||
* This model does not support updating.
|
||||
|
||||
Model internal data:
|
||||
++++++++++++++++++++
|
||||
|
||||
* ``num_components`` see :ocv:func:`createEigenFaceRecognizer`.
|
||||
* ``threshold`` see :ocv:func:`createEigenFaceRecognizer`.
|
||||
* ``eigenvalues`` The eigenvalues for this Principal Component Analysis (ordered descending).
|
||||
* ``eigenvectors`` The eigenvectors for this Principal Component Analysis (ordered by their eigenvalue).
|
||||
* ``mean`` The sample mean calculated from the training data.
|
||||
* ``projections`` The projections of the training data.
|
||||
* ``labels`` The threshold applied in the prediction. If the distance to the nearest neighbor is larger than the threshold, this method returns -1.
|
||||
|
||||
createFisherFaceRecognizer
|
||||
--------------------------
|
||||
|
||||
.. ocv:function:: Ptr<FaceRecognizer> createFisherFaceRecognizer(int num_components = 0, double threshold = DBL_MAX)
|
||||
|
||||
:param num_components: The number of components (read: Fisherfaces) kept for this Linear Discriminant Analysis with the Fisherfaces criterion. It's useful to keep all components, that means the number of your classes ``c`` (read: subjects, persons you want to recognize). If you leave this at the default (``0``) or set it to a value less-equal ``0`` or greater ``(c-1)``, it will be set to the correct number ``(c-1)`` automatically.
|
||||
|
||||
:param threshold: The threshold applied in the prediction. If the distance to the nearest neighbor is larger than the threshold, this method returns -1.
|
||||
|
||||
Notes:
|
||||
++++++
|
||||
|
||||
* Training and prediction must be done on grayscale images, use :ocv:func:`cvtColor` to convert between the color spaces.
|
||||
* **THE FISHERFACES METHOD MAKES THE ASSUMPTION, THAT THE TRAINING AND TEST IMAGES ARE OF EQUAL SIZE.** (caps-lock, because I got so many mails asking for this). You have to make sure your input data has the correct shape, else a meaningful exception is thrown. Use :ocv:func:`resize` to resize the images.
|
||||
* This model does not support updating.
|
||||
|
||||
Model internal data:
|
||||
++++++++++++++++++++
|
||||
|
||||
* ``num_components`` see :ocv:func:`createFisherFaceRecognizer`.
|
||||
* ``threshold`` see :ocv:func:`createFisherFaceRecognizer`.
|
||||
* ``eigenvalues`` The eigenvalues for this Linear Discriminant Analysis (ordered descending).
|
||||
* ``eigenvectors`` The eigenvectors for this Linear Discriminant Analysis (ordered by their eigenvalue).
|
||||
* ``mean`` The sample mean calculated from the training data.
|
||||
* ``projections`` The projections of the training data.
|
||||
* ``labels`` The labels corresponding to the projections.
|
||||
|
||||
|
||||
createLBPHFaceRecognizer
|
||||
-------------------------
|
||||
|
||||
.. ocv:function:: Ptr<FaceRecognizer> createLBPHFaceRecognizer(int radius=1, int neighbors=8, int grid_x=8, int grid_y=8, double threshold = DBL_MAX)
|
||||
|
||||
:param radius: The radius used for building the Circular Local Binary Pattern. The greater the radius, the
|
||||
:param neighbors: The number of sample points to build a Circular Local Binary Pattern from. An appropriate value is to use `` 8`` sample points. Keep in mind: the more sample points you include, the higher the computational cost.
|
||||
:param grid_x: The number of cells in the horizontal direction, ``8`` is a common value used in publications. The more cells, the finer the grid, the higher the dimensionality of the resulting feature vector.
|
||||
:param grid_y: The number of cells in the vertical direction, ``8`` is a common value used in publications. The more cells, the finer the grid, the higher the dimensionality of the resulting feature vector.
|
||||
:param threshold: The threshold applied in the prediction. If the distance to the nearest neighbor is larger than the threshold, this method returns -1.
|
||||
|
||||
Notes:
|
||||
++++++
|
||||
|
||||
* The Circular Local Binary Patterns (used in training and prediction) expect the data given as grayscale images, use :ocv:func:`cvtColor` to convert between the color spaces.
|
||||
* This model supports updating.
|
||||
|
||||
Model internal data:
|
||||
++++++++++++++++++++
|
||||
|
||||
* ``radius`` see :ocv:func:`createLBPHFaceRecognizer`.
|
||||
* ``neighbors`` see :ocv:func:`createLBPHFaceRecognizer`.
|
||||
* ``grid_x`` see :ocv:func:`createLBPHFaceRecognizer`.
|
||||
* ``grid_y`` see :ocv:func:`createLBPHFaceRecognizer`.
|
||||
* ``threshold`` see :ocv:func:`createLBPHFaceRecognizer`.
|
||||
* ``histograms`` Local Binary Patterns Histograms calculated from the given training data (empty if none was given).
|
||||
* ``labels`` Labels corresponding to the calculated Local Binary Patterns Histograms.
|
@ -1,86 +0,0 @@
|
||||
Changelog
|
||||
=========
|
||||
|
||||
Release 0.05
|
||||
------------
|
||||
|
||||
This library is now included in the official OpenCV distribution (from 2.4 on).
|
||||
The :ocv:class`FaceRecognizer` is now an :ocv:class:`Algorithm`, which better fits into the overall
|
||||
OpenCV API.
|
||||
|
||||
To reduce the confusion on user side and minimize my work, libfacerec and OpenCV
|
||||
have been synchronized and are now based on the same interfaces and implementation.
|
||||
|
||||
The library now has an extensive documentation:
|
||||
|
||||
* The API is explained in detail and with a lot of code examples.
|
||||
* The face recognition guide I had written for Python and GNU Octave/MATLAB has been adapted to the new OpenCV C++ ``cv::FaceRecognizer``.
|
||||
* A tutorial for gender classification with Fisherfaces.
|
||||
* A tutorial for face recognition in videos (e.g. webcam).
|
||||
|
||||
|
||||
Release highlights
|
||||
++++++++++++++++++
|
||||
|
||||
* There are no single highlights to pick from, this release is a highlight itself.
|
||||
|
||||
Release 0.04
|
||||
------------
|
||||
|
||||
This version is fully Windows-compatible and works with OpenCV 2.3.1. Several
|
||||
bugfixes, but none influenced the recognition rate.
|
||||
|
||||
Release highlights
|
||||
++++++++++++++++++
|
||||
|
||||
* A whole lot of exceptions with meaningful error messages.
|
||||
* A tutorial for Windows users: `http://bytefish.de/blog/opencv_visual_studio_and_libfacerec <http://bytefish.de/blog/opencv_visual_studio_and_libfacerec>`_
|
||||
|
||||
|
||||
Release 0.03
|
||||
------------
|
||||
|
||||
Reworked the library to provide separate implementations in cpp files, because
|
||||
it's the preferred way of contributing OpenCV libraries. This means the library
|
||||
is not header-only anymore. Slight API changes were done, please see the
|
||||
documentation for details.
|
||||
|
||||
Release highlights
|
||||
++++++++++++++++++
|
||||
|
||||
* New Unit Tests (for LBP Histograms) make the library more robust.
|
||||
* Added more documentation.
|
||||
|
||||
|
||||
Release 0.02
|
||||
------------
|
||||
|
||||
Reworked the library to provide separate implementations in cpp files, because
|
||||
it's the preferred way of contributing OpenCV libraries. This means the library
|
||||
is not header-only anymore. Slight API changes were done, please see the
|
||||
documentation for details.
|
||||
|
||||
Release highlights
|
||||
++++++++++++++++++
|
||||
|
||||
* New Unit Tests (for LBP Histograms) make the library more robust.
|
||||
* Added a documentation and changelog in reStructuredText.
|
||||
|
||||
Release 0.01
|
||||
------------
|
||||
|
||||
Initial release as header-only library.
|
||||
|
||||
Release highlights
|
||||
++++++++++++++++++
|
||||
|
||||
* Colormaps for OpenCV to enhance the visualization.
|
||||
* Face Recognition algorithms implemented:
|
||||
|
||||
* Eigenfaces [TP91]_
|
||||
* Fisherfaces [BHK97]_
|
||||
* Local Binary Patterns Histograms [AHP04]_
|
||||
|
||||
* Added persistence facilities to store the models with a common API.
|
||||
* Unit Tests (using `gtest <http://code.google.com/p/googletest/>`_).
|
||||
* Providing a CMakeLists.txt to enable easy cross-platform building.
|
@ -1,628 +0,0 @@
|
||||
Face Recognition with OpenCV
|
||||
############################
|
||||
|
||||
.. contents:: Table of Contents
|
||||
:depth: 3
|
||||
|
||||
Introduction
|
||||
============
|
||||
|
||||
`OpenCV (Open Source Computer Vision) <http://opencv.org>`_ is a popular computer vision library started by `Intel <http://www.intel.com>`_ in 1999. The cross-platform library sets its focus on real-time image processing and includes patent-free implementations of the latest computer vision algorithms. In 2008 `Willow Garage <http://www.willowgarage.com>`_ took over support and OpenCV 2.3.1 now comes with a programming interface to C, C++, `Python <http://www.python.org>`_ and `Android <http://www.android.com>`_. OpenCV is released under a BSD license so it is used in academic projects and commercial products alike.
|
||||
|
||||
OpenCV 2.4 now comes with the very new :ocv:class:`FaceRecognizer` class for face recognition, so you can start experimenting with face recognition right away. This document is the guide I've wished for, when I was working myself into face recognition. It shows you how to perform face recognition with :ocv:class:`FaceRecognizer` in OpenCV (with full source code listings) and gives you an introduction into the algorithms behind. I'll also show how to create the visualizations you can find in many publications, because a lot of people asked for.
|
||||
|
||||
The currently available algorithms are:
|
||||
|
||||
* Eigenfaces (see :ocv:func:`createEigenFaceRecognizer`)
|
||||
* Fisherfaces (see :ocv:func:`createFisherFaceRecognizer`)
|
||||
* Local Binary Patterns Histograms (see :ocv:func:`createLBPHFaceRecognizer`)
|
||||
|
||||
You don't need to copy and paste the source code examples from this page, because they are available in the ``src`` folder coming with this documentation. If you have built OpenCV with the samples turned on, chances are good you have them compiled already! Although it might be interesting for very advanced users, I've decided to leave the implementation details out as I am afraid they confuse new users.
|
||||
|
||||
All code in this document is released under the `BSD license <http://www.opensource.org/licenses/bsd-license>`_, so feel free to use it for your projects.
|
||||
|
||||
Face Recognition
|
||||
================
|
||||
|
||||
Face recognition is an easy task for humans. Experiments in [Tu06]_ have shown, that even one to three day old babies are able to distinguish between known faces. So how hard could it be for a computer? It turns out we know little about human recognition to date. Are inner features (eyes, nose, mouth) or outer features (head shape, hairline) used for a successful face recognition? How do we analyze an image and how does the brain encode it? It was shown by `David Hubel <http://en.wikipedia.org/wiki/David_H._Hubel>`_ and `Torsten Wiesel <http://en.wikipedia.org/wiki/Torsten_Wiesel>`_, that our brain has specialized nerve cells responding to specific local features of a scene, such as lines, edges, angles or movement. Since we don't see the world as scattered pieces, our visual cortex must somehow combine the different sources of information into useful patterns. Automatic face recognition is all about extracting those meaningful features from an image, putting them into a useful representation and performing some kind of classification on them.
|
||||
|
||||
Face recognition based on the geometric features of a face is probably the most intuitive approach to face recognition. One of the first automated face recognition systems was described in [Kanade73]_: marker points (position of eyes, ears, nose, ...) were used to build a feature vector (distance between the points, angle between them, ...). The recognition was performed by calculating the euclidean distance between feature vectors of a probe and reference image. Such a method is robust against changes in illumination by its nature, but has a huge drawback: the accurate registration of the marker points is complicated, even with state of the art algorithms. Some of the latest work on geometric face recognition was carried out in [Bru92]_. A 22-dimensional feature vector was used and experiments on large datasets have shown, that geometrical features alone my not carry enough information for face recognition.
|
||||
|
||||
The Eigenfaces method described in [TP91]_ took a holistic approach to face recognition: A facial image is a point from a high-dimensional image space and a lower-dimensional representation is found, where classification becomes easy. The lower-dimensional subspace is found with Principal Component Analysis, which identifies the axes with maximum variance. While this kind of transformation is optimal from a reconstruction standpoint, it doesn't take any class labels into account. Imagine a situation where the variance is generated from external sources, let it be light. The axes with maximum variance do not necessarily contain any discriminative information at all, hence a classification becomes impossible. So a class-specific projection with a Linear Discriminant Analysis was applied to face recognition in [BHK97]_. The basic idea is to minimize the variance within a class, while maximizing the variance between the classes at the same time.
|
||||
|
||||
Recently various methods for a local feature extraction emerged. To avoid the high-dimensionality of the input data only local regions of an image are described, the extracted features are (hopefully) more robust against partial occlusion, illumation and small sample size. Algorithms used for a local feature extraction are Gabor Wavelets ([Wiskott97]_), Discrete Cosinus Transform ([Messer06]_) and Local Binary Patterns ([AHP04]_). It's still an open research question what's the best way to preserve spatial information when applying a local feature extraction, because spatial information is potentially useful information.
|
||||
|
||||
Face Database
|
||||
==============
|
||||
|
||||
Let's get some data to experiment with first. I don't want to do a toy example here. We are doing face recognition, so you'll need some face images! You can either create your own dataset or start with one of the available face databases, `http://face-rec.org/databases/ <http://face-rec.org/databases>`_ gives you an up-to-date overview. Three interesting databases are (parts of the description are quoted from `http://face-rec.org <http://face-rec.org>`_):
|
||||
|
||||
* `AT&T Facedatabase <http://www.cl.cam.ac.uk/research/dtg/attarchive/facedatabase.html>`_ The AT&T Facedatabase, sometimes also referred to as *ORL Database of Faces*, contains ten different images of each of 40 distinct subjects. For some subjects, the images were taken at different times, varying the lighting, facial expressions (open / closed eyes, smiling / not smiling) and facial details (glasses / no glasses). All the images were taken against a dark homogeneous background with the subjects in an upright, frontal position (with tolerance for some side movement).
|
||||
|
||||
* `Yale Facedatabase A <http://vision.ucsd.edu/content/yale-face-database>`_, also known as Yalefaces. The AT&T Facedatabase is good for initial tests, but it's a fairly easy database. The Eigenfaces method already has a 97% recognition rate on it, so you won't see any great improvements with other algorithms. The Yale Facedatabase A (also known as Yalefaces) is a more appropriate dataset for initial experiments, because the recognition problem is harder. The database consists of 15 people (14 male, 1 female) each with 11 grayscale images sized :math:`320 \times 243` pixel. There are changes in the light conditions (center light, left light, right light), facial expressions (happy, normal, sad, sleepy, surprised, wink) and glasses (glasses, no-glasses).
|
||||
|
||||
The original images are not cropped and aligned. Please look into the :ref:`appendixft` for a Python script, that does the job for you.
|
||||
|
||||
* `Extended Yale Facedatabase B <http://vision.ucsd.edu/~leekc/ExtYaleDatabase/ExtYaleB.html>`_ The Extended Yale Facedatabase B contains 2414 images of 38 different people in its cropped version. The focus of this database is set on extracting features that are robust to illumination, the images have almost no variation in emotion/occlusion/... . I personally think, that this dataset is too large for the experiments I perform in this document. You better use the `AT&T Facedatabase <http://www.cl.cam.ac.uk/research/dtg/attarchive/facedatabase.html>`_ for intial testing. A first version of the Yale Facedatabase B was used in [BHK97]_ to see how the Eigenfaces and Fisherfaces method perform under heavy illumination changes. [Lee05]_ used the same setup to take 16128 images of 28 people. The Extended Yale Facedatabase B is the merge of the two databases, which is now known as Extended Yalefacedatabase B.
|
||||
|
||||
Preparing the data
|
||||
-------------------
|
||||
|
||||
Once we have acquired some data, we'll need to read it in our program. In the demo applications I have decided to read the images from a very simple CSV file. Why? Because it's the simplest platform-independent approach I can think of. However, if you know a simpler solution please ping me about it. Basically all the CSV file needs to contain are lines composed of a ``filename`` followed by a ``;`` followed by the ``label`` (as *integer number*), making up a line like this:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
/path/to/image.ext;0
|
||||
|
||||
Let's dissect the line. ``/path/to/image.ext`` is the path to an image, probably something like this if you are in Windows: ``C:/faces/person0/image0.jpg``. Then there is the separator ``;`` and finally we assign the label ``0`` to the image. Think of the label as the subject (the person) this image belongs to, so same subjects (persons) should have the same label.
|
||||
|
||||
Download the AT&T Facedatabase from AT&T Facedatabase and the corresponding CSV file from at.txt, which looks like this (file is without ... of course):
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
./at/s1/1.pgm;0
|
||||
./at/s1/2.pgm;0
|
||||
...
|
||||
./at/s2/1.pgm;1
|
||||
./at/s2/2.pgm;1
|
||||
...
|
||||
./at/s40/1.pgm;39
|
||||
./at/s40/2.pgm;39
|
||||
|
||||
Imagine I have extracted the files to ``D:/data/at`` and have downloaded the CSV file to ``D:/data/at.txt``. Then you would simply need to Search & Replace ``./`` with ``D:/data/``. You can do that in an editor of your choice, every sufficiently advanced editor can do this. Once you have a CSV file with valid filenames and labels, you can run any of the demos by passing the path to the CSV file as parameter:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
facerec_demo.exe D:/data/at.txt
|
||||
|
||||
Creating the CSV File
|
||||
+++++++++++++++++++++
|
||||
|
||||
You don't really want to create the CSV file by hand. I have prepared you a little Python script ``create_csv.py`` (you find it at ``src/create_csv.py`` coming with this tutorial) that automatically creates you a CSV file. If you have your images in hierarchie like this (``/basepath/<subject>/<image.ext>``):
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
philipp@mango:~/facerec/data/at$ tree
|
||||
.
|
||||
|-- s1
|
||||
| |-- 1.pgm
|
||||
| |-- ...
|
||||
| |-- 10.pgm
|
||||
|-- s2
|
||||
| |-- 1.pgm
|
||||
| |-- ...
|
||||
| |-- 10.pgm
|
||||
...
|
||||
|-- s40
|
||||
| |-- 1.pgm
|
||||
| |-- ...
|
||||
| |-- 10.pgm
|
||||
|
||||
|
||||
Then simply call create_csv.py with the path to the folder, just like this and you could save the output:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
philipp@mango:~/facerec/data$ python create_csv.py
|
||||
at/s13/2.pgm;0
|
||||
at/s13/7.pgm;0
|
||||
at/s13/6.pgm;0
|
||||
at/s13/9.pgm;0
|
||||
at/s13/5.pgm;0
|
||||
at/s13/3.pgm;0
|
||||
at/s13/4.pgm;0
|
||||
at/s13/10.pgm;0
|
||||
at/s13/8.pgm;0
|
||||
at/s13/1.pgm;0
|
||||
at/s17/2.pgm;1
|
||||
at/s17/7.pgm;1
|
||||
at/s17/6.pgm;1
|
||||
at/s17/9.pgm;1
|
||||
at/s17/5.pgm;1
|
||||
at/s17/3.pgm;1
|
||||
[...]
|
||||
|
||||
Please see the :ref:`appendixft` for additional informations.
|
||||
|
||||
Eigenfaces
|
||||
==========
|
||||
|
||||
The problem with the image representation we are given is its high dimensionality. Two-dimensional :math:`p \times q` grayscale images span a :math:`m = pq`-dimensional vector space, so an image with :math:`100 \times 100` pixels lies in a :math:`10,000`-dimensional image space already. The question is: Are all dimensions equally useful for us? We can only make a decision if there's any variance in data, so what we are looking for are the components that account for most of the information. The Principal Component Analysis (PCA) was independently proposed by `Karl Pearson <http://en.wikipedia.org/wiki/Karl_Pearson>`_ (1901) and `Harold Hotelling <http://en.wikipedia.org/wiki/Harold_Hotelling>`_ (1933) to turn a set of possibly correlated variables into a smaller set of uncorrelated variables. The idea is, that a high-dimensional dataset is often described by correlated variables and therefore only a few meaningful dimensions account for most of the information. The PCA method finds the directions with the greatest variance in the data, called principal components.
|
||||
|
||||
Algorithmic Description
|
||||
-----------------------
|
||||
|
||||
Let :math:`X = \{ x_{1}, x_{2}, \ldots, x_{n} \}` be a random vector with observations :math:`x_i \in R^{d}`.
|
||||
|
||||
1. Compute the mean :math:`\mu`
|
||||
|
||||
.. math::
|
||||
|
||||
\mu = \frac{1}{n} \sum_{i=1}^{n} x_{i}
|
||||
|
||||
2. Compute the the Covariance Matrix `S`
|
||||
|
||||
.. math::
|
||||
|
||||
S = \frac{1}{n} \sum_{i=1}^{n} (x_{i} - \mu) (x_{i} - \mu)^{T}`
|
||||
|
||||
3. Compute the eigenvalues :math:`\lambda_{i}` and eigenvectors :math:`v_{i}` of :math:`S`
|
||||
|
||||
.. math::
|
||||
|
||||
S v_{i} = \lambda_{i} v_{i}, i=1,2,\ldots,n
|
||||
|
||||
4. Order the eigenvectors descending by their eigenvalue. The :math:`k` principal components are the eigenvectors corresponding to the :math:`k` largest eigenvalues.
|
||||
|
||||
The :math:`k` principal components of the observed vector :math:`x` are then given by:
|
||||
|
||||
.. math::
|
||||
|
||||
y = W^{T} (x - \mu)
|
||||
|
||||
|
||||
where :math:`W = (v_{1}, v_{2}, \ldots, v_{k})`.
|
||||
|
||||
The reconstruction from the PCA basis is given by:
|
||||
|
||||
.. math::
|
||||
|
||||
x = W y + \mu
|
||||
|
||||
where :math:`W = (v_{1}, v_{2}, \ldots, v_{k})`.
|
||||
|
||||
|
||||
The Eigenfaces method then performs face recognition by:
|
||||
|
||||
* Projecting all training samples into the PCA subspace.
|
||||
* Projecting the query image into the PCA subspace.
|
||||
* Finding the nearest neighbor between the projected training images and the projected query image.
|
||||
|
||||
Still there's one problem left to solve. Imagine we are given :math:`400` images sized :math:`100 \times 100` pixel. The Principal Component Analysis solves the covariance matrix :math:`S = X X^{T}`, where :math:`{size}(X) = 10000 \times 400` in our example. You would end up with a :math:`10000 \times 10000` matrix, roughly :math:`0.8 GB`. Solving this problem isn't feasible, so we'll need to apply a trick. From your linear algebra lessons you know that a :math:`M \times N` matrix with :math:`M > N` can only have :math:`N - 1` non-zero eigenvalues. So it's possible to take the eigenvalue decomposition :math:`S = X^{T} X` of size :math:`N \times N` instead:
|
||||
|
||||
.. math::
|
||||
|
||||
X^{T} X v_{i} = \lambda_{i} v{i}
|
||||
|
||||
|
||||
and get the original eigenvectors of :math:`S = X X^{T}` with a left multiplication of the data matrix:
|
||||
|
||||
.. math::
|
||||
|
||||
X X^{T} (X v_{i}) = \lambda_{i} (X v_{i})
|
||||
|
||||
The resulting eigenvectors are orthogonal, to get orthonormal eigenvectors they need to be normalized to unit length. I don't want to turn this into a publication, so please look into [Duda01]_ for the derivation and proof of the equations.
|
||||
|
||||
Eigenfaces in OpenCV
|
||||
--------------------
|
||||
|
||||
For the first source code example, I'll go through it with you. I am first giving you the whole source code listing, and after this we'll look at the most important lines in detail. Please note: every source code listing is commented in detail, so you should have no problems following it.
|
||||
|
||||
.. literalinclude:: src/facerec_eigenfaces.cpp
|
||||
:language: cpp
|
||||
:linenos:
|
||||
|
||||
The source code for this demo application is also available in the ``src`` folder coming with this documentation:
|
||||
|
||||
* :download:`src/facerec_eigenfaces.cpp <src/facerec_eigenfaces.cpp>`
|
||||
|
||||
|
||||
I've used the jet colormap, so you can see how the grayscale values are distributed within the specific Eigenfaces. You can see, that the Eigenfaces do not only encode facial features, but also the illumination in the images (see the left light in Eigenface \#4, right light in Eigenfaces \#5):
|
||||
|
||||
.. image:: img/eigenfaces_opencv.png
|
||||
:align: center
|
||||
|
||||
We've already seen, that we can reconstruct a face from its lower dimensional approximation. So let's see how many Eigenfaces are needed for a good reconstruction. I'll do a subplot with :math:`10,30,\ldots,310` Eigenfaces:
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
// Display or save the image reconstruction at some predefined steps:
|
||||
for(int num_components = 10; num_components < 300; num_components+=15) {
|
||||
// slice the eigenvectors from the model
|
||||
Mat evs = Mat(W, Range::all(), Range(0, num_components));
|
||||
Mat projection = subspaceProject(evs, mean, images[0].reshape(1,1));
|
||||
Mat reconstruction = subspaceReconstruct(evs, mean, projection);
|
||||
// Normalize the result:
|
||||
reconstruction = norm_0_255(reconstruction.reshape(1, images[0].rows));
|
||||
// Display or save:
|
||||
if(argc == 2) {
|
||||
imshow(format("eigenface_reconstruction_%d", num_components), reconstruction);
|
||||
} else {
|
||||
imwrite(format("%s/eigenface_reconstruction_%d.png", output_folder.c_str(), num_components), reconstruction);
|
||||
}
|
||||
}
|
||||
|
||||
10 Eigenvectors are obviously not sufficient for a good image reconstruction, 50 Eigenvectors may already be sufficient to encode important facial features. You'll get a good reconstruction with approximately 300 Eigenvectors for the AT&T Facedatabase. There are rule of thumbs how many Eigenfaces you should choose for a successful face recognition, but it heavily depends on the input data. [Zhao03]_ is the perfect point to start researching for this:
|
||||
|
||||
.. image:: img/eigenface_reconstruction_opencv.png
|
||||
:align: center
|
||||
|
||||
|
||||
Fisherfaces
|
||||
============
|
||||
|
||||
The Principal Component Analysis (PCA), which is the core of the Eigenfaces method, finds a linear combination of features that maximizes the total variance in data. While this is clearly a powerful way to represent data, it doesn't consider any classes and so a lot of discriminative information *may* be lost when throwing components away. Imagine a situation where the variance in your data is generated by an external source, let it be the light. The components identified by a PCA do not necessarily contain any discriminative information at all, so the projected samples are smeared together and a classification becomes impossible (see `http://www.bytefish.de/wiki/pca_lda_with_gnu_octave <http://www.bytefish.de/wiki/pca_lda_with_gnu_octave>`_ for an example).
|
||||
|
||||
The Linear Discriminant Analysis performs a class-specific dimensionality reduction and was invented by the great statistician `Sir R. A. Fisher <http://en.wikipedia.org/wiki/Ronald_Fisher>`_. He successfully used it for classifying flowers in his 1936 paper *The use of multiple measurements in taxonomic problems* [Fisher36]_. In order to find the combination of features that separates best between classes the Linear Discriminant Analysis maximizes the ratio of between-classes to within-classes scatter, instead of maximizing the overall scatter. The idea is simple: same classes should cluster tightly together, while different classes are as far away as possible from each other in the lower-dimensional representation. This was also recognized by `Belhumeur <http://www.cs.columbia.edu/~belhumeur/>`_, `Hespanha <http://www.ece.ucsb.edu/~hespanha/>`_ and `Kriegman <http://cseweb.ucsd.edu/~kriegman/>`_ and so they applied a Discriminant Analysis to face recognition in [BHK97]_.
|
||||
|
||||
Algorithmic Description
|
||||
-----------------------
|
||||
|
||||
Let :math:`X` be a random vector with samples drawn from :math:`c` classes:
|
||||
|
||||
|
||||
.. math::
|
||||
:nowrap:
|
||||
|
||||
\begin{align*}
|
||||
X & = & \{X_1,X_2,\ldots,X_c\} \\
|
||||
X_i & = & \{x_1, x_2, \ldots, x_n\}
|
||||
\end{align*}
|
||||
|
||||
|
||||
The scatter matrices :math:`S_{B}` and `S_{W}` are calculated as:
|
||||
|
||||
.. math::
|
||||
:nowrap:
|
||||
|
||||
\begin{align*}
|
||||
S_{B} & = & \sum_{i=1}^{c} N_{i} (\mu_i - \mu)(\mu_i - \mu)^{T} \\
|
||||
S_{W} & = & \sum_{i=1}^{c} \sum_{x_{j} \in X_{i}} (x_j - \mu_i)(x_j - \mu_i)^{T}
|
||||
\end{align*}
|
||||
|
||||
, where :math:`\mu` is the total mean:
|
||||
|
||||
.. math::
|
||||
|
||||
\mu = \frac{1}{N} \sum_{i=1}^{N} x_i
|
||||
|
||||
And :math:`\mu_i` is the mean of class :math:`i \in \{1,\ldots,c\}`:
|
||||
|
||||
.. math::
|
||||
|
||||
\mu_i = \frac{1}{|X_i|} \sum_{x_j \in X_i} x_j
|
||||
|
||||
Fisher's classic algorithm now looks for a projection :math:`W`, that maximizes the class separability criterion:
|
||||
|
||||
.. math::
|
||||
|
||||
W_{opt} = \operatorname{arg\,max}_{W} \frac{|W^T S_B W|}{|W^T S_W W|}
|
||||
|
||||
|
||||
Following [BHK97]_, a solution for this optimization problem is given by solving the General Eigenvalue Problem:
|
||||
|
||||
.. math::
|
||||
:nowrap:
|
||||
|
||||
\begin{align*}
|
||||
S_{B} v_{i} & = & \lambda_{i} S_w v_{i} \nonumber \\
|
||||
S_{W}^{-1} S_{B} v_{i} & = & \lambda_{i} v_{i}
|
||||
\end{align*}
|
||||
|
||||
There's one problem left to solve: The rank of :math:`S_{W}` is at most :math:`(N-c)`, with :math:`N` samples and :math:`c` classes. In pattern recognition problems the number of samples :math:`N` is almost always samller than the dimension of the input data (the number of pixels), so the scatter matrix :math:`S_{W}` becomes singular (see [RJ91]_). In [BHK97]_ this was solved by performing a Principal Component Analysis on the data and projecting the samples into the :math:`(N-c)`-dimensional space. A Linear Discriminant Analysis was then performed on the reduced data, because :math:`S_{W}` isn't singular anymore.
|
||||
|
||||
The optimization problem can then be rewritten as:
|
||||
|
||||
.. math::
|
||||
:nowrap:
|
||||
|
||||
\begin{align*}
|
||||
W_{pca} & = & \operatorname{arg\,max}_{W} |W^T S_T W| \\
|
||||
W_{fld} & = & \operatorname{arg\,max}_{W} \frac{|W^T W_{pca}^T S_{B} W_{pca} W|}{|W^T W_{pca}^T S_{W} W_{pca} W|}
|
||||
\end{align*}
|
||||
|
||||
The transformation matrix :math:`W`, that projects a sample into the :math:`(c-1)`-dimensional space is then given by:
|
||||
|
||||
.. math::
|
||||
|
||||
W = W_{fld}^{T} W_{pca}^{T}
|
||||
|
||||
Fisherfaces in OpenCV
|
||||
---------------------
|
||||
|
||||
.. literalinclude:: src/facerec_fisherfaces.cpp
|
||||
:language: cpp
|
||||
:linenos:
|
||||
|
||||
The source code for this demo application is also available in the ``src`` folder coming with this documentation:
|
||||
|
||||
* :download:`src/facerec_fisherfaces.cpp <src/facerec_fisherfaces.cpp>`
|
||||
|
||||
|
||||
For this example I am going to use the Yale Facedatabase A, just because the plots are nicer. Each Fisherface has the same length as an original image, thus it can be displayed as an image. The demo shows (or saves) the first, at most 16 Fisherfaces:
|
||||
|
||||
.. image:: img/fisherfaces_opencv.png
|
||||
:align: center
|
||||
|
||||
The Fisherfaces method learns a class-specific transformation matrix, so the they do not capture illumination as obviously as the Eigenfaces method. The Discriminant Analysis instead finds the facial features to discriminate between the persons. It's important to mention, that the performance of the Fisherfaces heavily depends on the input data as well. Practically said: if you learn the Fisherfaces for well-illuminated pictures only and you try to recognize faces in bad-illuminated scenes, then method is likely to find the wrong components (just because those features may not be predominant on bad illuminated images). This is somewhat logical, since the method had no chance to learn the illumination.
|
||||
|
||||
The Fisherfaces allow a reconstruction of the projected image, just like the Eigenfaces did. But since we only identified the features to distinguish between subjects, you can't expect a nice reconstruction of the original image. For the Fisherfaces method we'll project the sample image onto each of the Fisherfaces instead. So you'll have a nice visualization, which feature each of the Fisherfaces describes:
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
// Display or save the image reconstruction at some predefined steps:
|
||||
for(int num_component = 0; num_component < min(16, W.cols); num_component++) {
|
||||
// Slice the Fisherface from the model:
|
||||
Mat ev = W.col(num_component);
|
||||
Mat projection = subspaceProject(ev, mean, images[0].reshape(1,1));
|
||||
Mat reconstruction = subspaceReconstruct(ev, mean, projection);
|
||||
// Normalize the result:
|
||||
reconstruction = norm_0_255(reconstruction.reshape(1, images[0].rows));
|
||||
// Display or save:
|
||||
if(argc == 2) {
|
||||
imshow(format("fisherface_reconstruction_%d", num_component), reconstruction);
|
||||
} else {
|
||||
imwrite(format("%s/fisherface_reconstruction_%d.png", output_folder.c_str(), num_component), reconstruction);
|
||||
}
|
||||
}
|
||||
|
||||
The differences may be subtle for the human eyes, but you should be able to see some differences:
|
||||
|
||||
.. image:: img/fisherface_reconstruction_opencv.png
|
||||
:align: center
|
||||
|
||||
|
||||
Local Binary Patterns Histograms
|
||||
================================
|
||||
|
||||
Eigenfaces and Fisherfaces take a somewhat holistic approach to face recognition. You treat your data as a vector somewhere in a high-dimensional image space. We all know high-dimensionality is bad, so a lower-dimensional subspace is identified, where (probably) useful information is preserved. The Eigenfaces approach maximizes the total scatter, which can lead to problems if the variance is generated by an external source, because components with a maximum variance over all classes aren't necessarily useful for classification (see `http://www.bytefish.de/wiki/pca_lda_with_gnu_octave <http://www.bytefish.de/wiki/pca_lda_with_gnu_octave>`_). So to preserve some discriminative information we applied a Linear Discriminant Analysis and optimized as described in the Fisherfaces method. The Fisherfaces method worked great... at least for the constrained scenario we've assumed in our model.
|
||||
|
||||
Now real life isn't perfect. You simply can't guarantee perfect light settings in your images or 10 different images of a person. So what if there's only one image for each person? Our covariance estimates for the subspace *may* be horribly wrong, so will the recognition. Remember the Eigenfaces method had a 96% recognition rate on the AT&T Facedatabase? How many images do we actually need to get such useful estimates? Here are the Rank-1 recognition rates of the Eigenfaces and Fisherfaces method on the AT&T Facedatabase, which is a fairly easy image database:
|
||||
|
||||
.. image:: img/at_database_small_sample_size.png
|
||||
:scale: 60%
|
||||
:align: center
|
||||
|
||||
So in order to get good recognition rates you'll need at least 8(+-1) images for each person and the Fisherfaces method doesn't really help here. The above experiment is a 10-fold cross validated result carried out with the facerec framework at: `https://github.com/bytefish/facerec <https://github.com/bytefish/facerec>`_. This is not a publication, so I won't back these figures with a deep mathematical analysis. Please have a look into [KM01]_ for a detailed analysis of both methods, when it comes to small training datasets.
|
||||
|
||||
So some research concentrated on extracting local features from images. The idea is to not look at the whole image as a high-dimensional vector, but describe only local features of an object. The features you extract this way will have a low-dimensionality implicitly. A fine idea! But you'll soon observe the image representation we are given doesn't only suffer from illumination variations. Think of things like scale, translation or rotation in images - your local description has to be at least a bit robust against those things. Just like :ocv:class:`SIFT`, the Local Binary Patterns methodology has its roots in 2D texture analysis. The basic idea of Local Binary Patterns is to summarize the local structure in an image by comparing each pixel with its neighborhood. Take a pixel as center and threshold its neighbors against. If the intensity of the center pixel is greater-equal its neighbor, then denote it with 1 and 0 if not. You'll end up with a binary number for each pixel, just like 11001111. So with 8 surrounding pixels you'll end up with 2^8 possible combinations, called *Local Binary Patterns* or sometimes referred to as *LBP codes*. The first LBP operator described in literature actually used a fixed 3 x 3 neighborhood just like this:
|
||||
|
||||
.. image:: img/lbp/lbp.png
|
||||
:scale: 80%
|
||||
:align: center
|
||||
|
||||
Algorithmic Description
|
||||
-----------------------
|
||||
|
||||
A more formal description of the LBP operator can be given as:
|
||||
|
||||
.. math::
|
||||
|
||||
LBP(x_c, y_c) = \sum_{p=0}^{P-1} 2^p s(i_p - i_c)
|
||||
|
||||
, with :math:`(x_c, y_c)` as central pixel with intensity :math:`i_c`; and :math:`i_n` being the intensity of the the neighbor pixel. :math:`s` is the sign function defined as:
|
||||
|
||||
.. math::
|
||||
:nowrap:
|
||||
|
||||
\begin{equation}
|
||||
s(x) =
|
||||
\begin{cases}
|
||||
1 & \text{if $x \geq 0$}\\
|
||||
0 & \text{else}
|
||||
\end{cases}
|
||||
\end{equation}
|
||||
|
||||
This description enables you to capture very fine grained details in images. In fact the authors were able to compete with state of the art results for texture classification. Soon after the operator was published it was noted, that a fixed neighborhood fails to encode details differing in scale. So the operator was extended to use a variable neighborhood in [AHP04]_. The idea is to align an abritrary number of neighbors on a circle with a variable radius, which enables to capture the following neighborhoods:
|
||||
|
||||
.. image:: img/lbp/patterns.png
|
||||
:scale: 80%
|
||||
:align: center
|
||||
|
||||
For a given Point :math:`(x_c,y_c)` the position of the neighbor :math:`(x_p,y_p), p \in P` can be calculated by:
|
||||
|
||||
.. math::
|
||||
:nowrap:
|
||||
|
||||
\begin{align*}
|
||||
x_{p} & = & x_c + R \cos({\frac{2\pi p}{P}})\\
|
||||
y_{p} & = & y_c - R \sin({\frac{2\pi p}{P}})
|
||||
\end{align*}
|
||||
|
||||
Where :math:`R` is the radius of the circle and :math:`P` is the number of sample points.
|
||||
|
||||
The operator is an extension to the original LBP codes, so it's sometimes called *Extended LBP* (also referred to as *Circular LBP*) . If a points coordinate on the circle doesn't correspond to image coordinates, the point get's interpolated. Computer science has a bunch of clever interpolation schemes, the OpenCV implementation does a bilinear interpolation:
|
||||
|
||||
.. math::
|
||||
:nowrap:
|
||||
|
||||
\begin{align*}
|
||||
f(x,y) \approx \begin{bmatrix}
|
||||
1-x & x \end{bmatrix} \begin{bmatrix}
|
||||
f(0,0) & f(0,1) \\
|
||||
f(1,0) & f(1,1) \end{bmatrix} \begin{bmatrix}
|
||||
1-y \\
|
||||
y \end{bmatrix}.
|
||||
\end{align*}
|
||||
|
||||
By definition the LBP operator is robust against monotonic gray scale transformations. We can easily verify this by looking at the LBP image of an artificially modified image (so you see what an LBP image looks like!):
|
||||
|
||||
.. image:: img/lbp/lbp_yale.jpg
|
||||
:scale: 60%
|
||||
:align: center
|
||||
|
||||
So what's left to do is how to incorporate the spatial information in the face recognition model. The representation proposed by Ahonen et. al [AHP04]_ is to divide the LBP image into :math:`m` local regions and extract a histogram from each. The spatially enhanced feature vector is then obtained by concatenating the local histograms (**not merging them**). These histograms are called *Local Binary Patterns Histograms*.
|
||||
|
||||
Local Binary Patterns Histograms in OpenCV
|
||||
------------------------------------------
|
||||
|
||||
.. literalinclude:: src/facerec_lbph.cpp
|
||||
:language: cpp
|
||||
:linenos:
|
||||
|
||||
The source code for this demo application is also available in the ``src`` folder coming with this documentation:
|
||||
|
||||
* :download:`src/facerec_lbph.cpp <src/facerec_lbph.cpp>`
|
||||
|
||||
Conclusion
|
||||
==========
|
||||
|
||||
You've learned how to use the new :ocv:class:`FaceRecognizer` in real applications. After reading the document you also know how the algorithms work, so now it's time for you to experiment with the available algorithms. Use them, improve them and let the OpenCV community participate!
|
||||
|
||||
Credits
|
||||
=======
|
||||
|
||||
This document wouldn't be possible without the kind permission to use the face images of the *AT&T Database of Faces* and the *Yale Facedatabase A/B*.
|
||||
|
||||
The Database of Faces
|
||||
---------------------
|
||||
|
||||
** Important: when using these images, please give credit to "AT&T Laboratories, Cambridge." **
|
||||
|
||||
The Database of Faces, formerly *The ORL Database of Faces*, contains a set of face images taken between April 1992 and April 1994. The database was used in the context of a face recognition project carried out in collaboration with the Speech, Vision and Robotics Group of the Cambridge University Engineering Department.
|
||||
|
||||
There are ten different images of each of 40 distinct subjects. For some subjects, the images were taken at different times, varying the lighting, facial expressions (open / closed eyes, smiling / not smiling) and facial details (glasses / no glasses). All the images were taken against a dark homogeneous background with the subjects in an upright, frontal position (with tolerance for some side movement).
|
||||
|
||||
The files are in PGM format. The size of each image is 92x112 pixels, with 256 grey levels per pixel. The images are organised in 40 directories (one for each subject), which have names of the form sX, where X indicates the subject number (between 1 and 40). In each of these directories, there are ten different images of that subject, which have names of the form Y.pgm, where Y is the image number for that subject (between 1 and 10).
|
||||
|
||||
A copy of the database can be retrieved from: `http://www.cl.cam.ac.uk/research/dtg/attarchive/pub/data/att_faces.zip <http://www.cl.cam.ac.uk/research/dtg/attarchive/pub/data/att_faces.zip>`_.
|
||||
|
||||
Yale Facedatabase A
|
||||
-------------------
|
||||
|
||||
*With the permission of the authors I am allowed to show a small number of images (say subject 1 and all the variations) and all images such as Fisherfaces and Eigenfaces from either Yale Facedatabase A or the Yale Facedatabase B.*
|
||||
|
||||
The Yale Face Database A (size 6.4MB) contains 165 grayscale images in GIF format of 15 individuals. There are 11 images per subject, one per different facial expression or configuration: center-light, w/glasses, happy, left-light, w/no glasses, normal, right-light, sad, sleepy, surprised, and wink. (Source: `http://cvc.yale.edu/projects/yalefaces/yalefaces.html <http://cvc.yale.edu/projects/yalefaces/yalefaces.html>`_)
|
||||
|
||||
Yale Facedatabase B
|
||||
--------------------
|
||||
|
||||
*With the permission of the authors I am allowed to show a small number of images (say subject 1 and all the variations) and all images such as Fisherfaces and Eigenfaces from either Yale Facedatabase A or the Yale Facedatabase B.*
|
||||
|
||||
The extended Yale Face Database B contains 16128 images of 28 human subjects under 9 poses and 64 illumination conditions. The data format of this database is the same as the Yale Face Database B. Please refer to the homepage of the Yale Face Database B (or one copy of this page) for more detailed information of the data format.
|
||||
|
||||
You are free to use the extended Yale Face Database B for research purposes. All publications which use this database should acknowledge the use of "the Exteded Yale Face Database B" and reference Athinodoros Georghiades, Peter Belhumeur, and David Kriegman's paper, "From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose", PAMI, 2001, `[bibtex] <http://vision.ucsd.edu/~leekc/ExtYaleDatabase/athosref.html>`_.
|
||||
|
||||
The extended database as opposed to the original Yale Face Database B with 10 subjects was first reported by Kuang-Chih Lee, Jeffrey Ho, and David Kriegman in "Acquiring Linear Subspaces for Face Recognition under Variable Lighting, PAMI, May, 2005 `[pdf] <http://vision.ucsd.edu/~leekc/papers/9pltsIEEE.pdf>`_." All test image data used in the experiments are manually aligned, cropped, and then re-sized to 168x192 images. If you publish your experimental results with the cropped images, please reference the PAMI2005 paper as well. (Source: `http://vision.ucsd.edu/~leekc/ExtYaleDatabase/ExtYaleB.html <http://vision.ucsd.edu/~leekc/ExtYaleDatabase/ExtYaleB.html>`_)
|
||||
|
||||
Literature
|
||||
==========
|
||||
|
||||
.. [AHP04] Ahonen, T., Hadid, A., and Pietikainen, M. *Face Recognition with Local Binary Patterns.* Computer Vision - ECCV 2004 (2004), 469–481.
|
||||
|
||||
.. [BHK97] Belhumeur, P. N., Hespanha, J., and Kriegman, D. *Eigenfaces vs. Fisherfaces: Recognition Using Class Specific Linear Projection.* IEEE Transactions on Pattern Analysis and Machine Intelligence 19, 7 (1997), 711–720.
|
||||
|
||||
.. [Bru92] Brunelli, R., Poggio, T. *Face Recognition through Geometrical Features.* European Conference on Computer Vision (ECCV) 1992, S. 792–800.
|
||||
|
||||
.. [Duda01] Duda, Richard O. and Hart, Peter E. and Stork, David G., *Pattern Classification* (2nd Edition) 2001.
|
||||
|
||||
.. [Fisher36] Fisher, R. A. *The use of multiple measurements in taxonomic problems.* Annals Eugen. 7 (1936), 179–188.
|
||||
|
||||
.. [GBK01] Georghiades, A.S. and Belhumeur, P.N. and Kriegman, D.J., *From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose* IEEE Transactions on Pattern Analysis and Machine Intelligence 23, 6 (2001), 643-660.
|
||||
|
||||
.. [Kanade73] Kanade, T. *Picture processing system by computer complex and recognition of human faces.* PhD thesis, Kyoto University, November 1973
|
||||
|
||||
.. [KM01] Martinez, A and Kak, A. *PCA versus LDA* IEEE Transactions on Pattern Analysis and Machine Intelligence, Vol. 23, No.2, pp. 228-233, 2001.
|
||||
|
||||
.. [Lee05] Lee, K., Ho, J., Kriegman, D. *Acquiring Linear Subspaces for Face Recognition under Variable Lighting.* In: IEEE Transactions on Pattern Analysis and Machine Intelligence (PAMI) 27 (2005), Nr. 5
|
||||
|
||||
.. [Messer06] Messer, K. et al. *Performance Characterisation of Face Recognition Algorithms and Their Sensitivity to Severe Illumination Changes.* In: In: ICB, 2006, S. 1–11.
|
||||
|
||||
.. [RJ91] S. Raudys and A.K. Jain. *Small sample size effects in statistical pattern recognition: Recommendations for practitioneers.* - IEEE Transactions on Pattern Analysis and Machine Intelligence 13, 3 (1991), 252-264.
|
||||
|
||||
.. [Tan10] Tan, X., and Triggs, B. *Enhanced local texture feature sets for face recognition under difficult lighting conditions.* IEEE Transactions on Image Processing 19 (2010), 1635–650.
|
||||
|
||||
.. [TP91] Turk, M., and Pentland, A. *Eigenfaces for recognition.* Journal of Cognitive Neuroscience 3 (1991), 71–86.
|
||||
|
||||
.. [Tu06] Chiara Turati, Viola Macchi Cassia, F. S., and Leo, I. *Newborns face recognition: Role of inner and outer facial features. Child Development* 77, 2 (2006), 297–311.
|
||||
|
||||
.. [Wiskott97] Wiskott, L., Fellous, J., Krüger, N., Malsburg, C. *Face Recognition By Elastic Bunch Graph Matching.* IEEE Transactions on Pattern Analysis and Machine Intelligence 19 (1997), S. 775–779
|
||||
|
||||
.. [Zhao03] Zhao, W., Chellappa, R., Phillips, P., and Rosenfeld, A. Face recognition: A literature survey. ACM Computing Surveys (CSUR) 35, 4 (2003), 399–458.
|
||||
|
||||
.. _appendixft:
|
||||
|
||||
Appendix
|
||||
========
|
||||
|
||||
Creating the CSV File
|
||||
---------------------
|
||||
|
||||
You don't really want to create the CSV file by hand. I have prepared you a little Python script ``create_csv.py`` (you find it at ``/src/create_csv.py`` coming with this tutorial) that automatically creates you a CSV file. If you have your images in hierarchie like this (``/basepath/<subject>/<image.ext>``):
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
philipp@mango:~/facerec/data/at$ tree
|
||||
.
|
||||
|-- s1
|
||||
| |-- 1.pgm
|
||||
| |-- ...
|
||||
| |-- 10.pgm
|
||||
|-- s2
|
||||
| |-- 1.pgm
|
||||
| |-- ...
|
||||
| |-- 10.pgm
|
||||
...
|
||||
|-- s40
|
||||
| |-- 1.pgm
|
||||
| |-- ...
|
||||
| |-- 10.pgm
|
||||
|
||||
|
||||
Then simply call ``create_csv.py`` with the path to the folder, just like this and you could save the output:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
philipp@mango:~/facerec/data$ python create_csv.py
|
||||
at/s13/2.pgm;0
|
||||
at/s13/7.pgm;0
|
||||
at/s13/6.pgm;0
|
||||
at/s13/9.pgm;0
|
||||
at/s13/5.pgm;0
|
||||
at/s13/3.pgm;0
|
||||
at/s13/4.pgm;0
|
||||
at/s13/10.pgm;0
|
||||
at/s13/8.pgm;0
|
||||
at/s13/1.pgm;0
|
||||
at/s17/2.pgm;1
|
||||
at/s17/7.pgm;1
|
||||
at/s17/6.pgm;1
|
||||
at/s17/9.pgm;1
|
||||
at/s17/5.pgm;1
|
||||
at/s17/3.pgm;1
|
||||
[...]
|
||||
|
||||
Here is the script, if you can't find it:
|
||||
|
||||
.. literalinclude:: ./src/create_csv.py
|
||||
:language: python
|
||||
:linenos:
|
||||
|
||||
Aligning Face Images
|
||||
---------------------
|
||||
|
||||
An accurate alignment of your image data is especially important in tasks like emotion detection, were you need as much detail as possible. Believe me... You don't want to do this by hand. So I've prepared you a tiny Python script. The code is really easy to use. To scale, rotate and crop the face image you just need to call *CropFace(image, eye_left, eye_right, offset_pct, dest_sz)*, where:
|
||||
|
||||
* *eye_left* is the position of the left eye
|
||||
* *eye_right* is the position of the right eye
|
||||
* *offset_pct* is the percent of the image you want to keep next to the eyes (horizontal, vertical direction)
|
||||
* *dest_sz* is the size of the output image
|
||||
|
||||
If you are using the same *offset_pct* and *dest_sz* for your images, they are all aligned at the eyes.
|
||||
|
||||
.. literalinclude:: ./src/crop_face.py
|
||||
:language: python
|
||||
:linenos:
|
||||
|
||||
Imagine we are given `this photo of Arnold Schwarzenegger <http://en.wikipedia.org/wiki/File:Arnold_Schwarzenegger_edit%28ws%29.jpg>`_, which is under a Public Domain license. The (x,y)-position of the eyes is approximately *(252,364)* for the left and *(420,366)* for the right eye. Now you only need to define the horizontal offset, vertical offset and the size your scaled, rotated & cropped face should have.
|
||||
|
||||
Here are some examples:
|
||||
|
||||
+---------------------------------+----------------------------------------------------------------------------+
|
||||
| Configuration | Cropped, Scaled, Rotated Face |
|
||||
+=================================+============================================================================+
|
||||
| 0.1 (10%), 0.1 (10%), (200,200) | .. image:: ./img/tutorial/gender_classification/arnie_10_10_200_200.jpg |
|
||||
+---------------------------------+----------------------------------------------------------------------------+
|
||||
| 0.2 (20%), 0.2 (20%), (200,200) | .. image:: ./img/tutorial/gender_classification/arnie_20_20_200_200.jpg |
|
||||
+---------------------------------+----------------------------------------------------------------------------+
|
||||
| 0.3 (30%), 0.3 (30%), (200,200) | .. image:: ./img/tutorial/gender_classification/arnie_30_30_200_200.jpg |
|
||||
+---------------------------------+----------------------------------------------------------------------------+
|
||||
| 0.2 (20%), 0.2 (20%), (70,70) | .. image:: ./img/tutorial/gender_classification/arnie_20_20_70_70.jpg |
|
||||
+---------------------------------+----------------------------------------------------------------------------+
|
||||
|
||||
CSV for the AT&T Facedatabase
|
||||
------------------------------
|
||||
|
||||
.. literalinclude:: etc/at.txt
|
||||
:language: none
|
||||
:linenos:
|
Before Width: | Height: | Size: 33 KiB |
Before Width: | Height: | Size: 1.3 KiB |
Before Width: | Height: | Size: 1.3 KiB |
Before Width: | Height: | Size: 1.3 KiB |
Before Width: | Height: | Size: 1.4 KiB |
Before Width: | Height: | Size: 1.7 KiB |
Before Width: | Height: | Size: 1.5 KiB |
Before Width: | Height: | Size: 1.5 KiB |
Before Width: | Height: | Size: 1.5 KiB |
Before Width: | Height: | Size: 1.4 KiB |
Before Width: | Height: | Size: 1.4 KiB |
Before Width: | Height: | Size: 1.5 KiB |
Before Width: | Height: | Size: 1.2 KiB |
Before Width: | Height: | Size: 1.3 KiB |
Before Width: | Height: | Size: 1.2 KiB |
Before Width: | Height: | Size: 171 KiB |
Before Width: | Height: | Size: 108 KiB |
Before Width: | Height: | Size: 111 KiB |
Before Width: | Height: | Size: 281 KiB |
Before Width: | Height: | Size: 15 KiB |
Before Width: | Height: | Size: 83 KiB |
Before Width: | Height: | Size: 18 KiB |
Before Width: | Height: | Size: 290 KiB |
Before Width: | Height: | Size: 5.4 KiB |
Before Width: | Height: | Size: 6.2 KiB |
Before Width: | Height: | Size: 1.8 KiB |
Before Width: | Height: | Size: 7.1 KiB |
Before Width: | Height: | Size: 92 KiB |
Before Width: | Height: | Size: 36 KiB |
Before Width: | Height: | Size: 10 KiB |
Before Width: | Height: | Size: 9.9 KiB |
@ -1,32 +0,0 @@
|
||||
FaceRecognizer - Face Recognition with OpenCV
|
||||
##############################################
|
||||
|
||||
OpenCV 2.4 now comes with the very new :ocv:class:`FaceRecognizer` class for face recognition. This documentation is going to explain you :doc:`the API <facerec_api>` in detail and it will give you a lot of help to get started (full source code examples). :doc:`Face Recognition with OpenCV <facerec_tutorial>` is the definite guide to the new :ocv:class:`FaceRecognizer`. There's also a :doc:`tutorial on gender classification <tutorial/facerec_gender_classification>`, a :doc:`tutorial for face recognition in videos <tutorial/facerec_video_recognition>` and it's shown :doc:`how to load & save your results <tutorial/facerec_save_load>`.
|
||||
|
||||
These documents are the help I have wished for, when I was working myself into face recognition. I hope you also think the new :ocv:class:`FaceRecognizer` is a useful addition to OpenCV.
|
||||
|
||||
Please issue any feature requests and/or bugs on the official OpenCV bug tracker at:
|
||||
|
||||
* http://code.opencv.org/projects/opencv/issues
|
||||
|
||||
Contents
|
||||
========
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
FaceRecognizer API <facerec_api>
|
||||
Guide to Face Recognition with OpenCV <facerec_tutorial>
|
||||
Tutorial on Gender Classification <tutorial/facerec_gender_classification>
|
||||
Tutorial on Face Recognition in Videos <tutorial/facerec_video_recognition>
|
||||
Tutorial On Saving & Loading a FaceRecognizer <tutorial/facerec_save_load>
|
||||
How to use Colormaps in OpenCV <colormaps>
|
||||
Changelog <facerec_changelog>
|
||||
|
||||
Indices and tables
|
||||
==================
|
||||
|
||||
* :ref:`genindex`
|
||||
* :ref:`modindex`
|
||||
* :ref:`search`
|
@ -1,25 +0,0 @@
|
||||
CMAKE_MINIMUM_REQUIRED(VERSION 2.6)
|
||||
|
||||
set(name "facerec")
|
||||
project(facerec_cpp_samples)
|
||||
|
||||
#SET(OpenCV_DIR /path/to/your/opencv/installation)
|
||||
|
||||
# packages
|
||||
find_package(OpenCV REQUIRED) # http://opencv.org
|
||||
|
||||
# probably you should loop through the sample files here
|
||||
add_executable(facerec_demo facerec_demo.cpp)
|
||||
target_link_libraries(facerec_demo opencv_core opencv_contrib opencv_imgproc opencv_highgui)
|
||||
|
||||
add_executable(facerec_video facerec_video.cpp)
|
||||
target_link_libraries(facerec_video opencv_contrib opencv_core opencv_imgproc opencv_highgui opencv_objdetect opencv_imgproc)
|
||||
|
||||
add_executable(facerec_eigenfaces facerec_eigenfaces.cpp)
|
||||
target_link_libraries(facerec_eigenfaces opencv_contrib opencv_core opencv_imgproc opencv_highgui)
|
||||
|
||||
add_executable(facerec_fisherfaces facerec_fisherfaces.cpp)
|
||||
target_link_libraries(facerec_fisherfaces opencv_contrib opencv_core opencv_imgproc opencv_highgui)
|
||||
|
||||
add_executable(facerec_lbph facerec_lbph.cpp)
|
||||
target_link_libraries(facerec_lbph opencv_contrib opencv_core opencv_imgproc opencv_highgui)
|
@ -1,43 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import sys
|
||||
import os.path
|
||||
|
||||
# This is a tiny script to help you creating a CSV file from a face
|
||||
# database with a similar hierarchie:
|
||||
#
|
||||
# philipp@mango:~/facerec/data/at$ tree
|
||||
# .
|
||||
# |-- README
|
||||
# |-- s1
|
||||
# | |-- 1.pgm
|
||||
# | |-- ...
|
||||
# | |-- 10.pgm
|
||||
# |-- s2
|
||||
# | |-- 1.pgm
|
||||
# | |-- ...
|
||||
# | |-- 10.pgm
|
||||
# ...
|
||||
# |-- s40
|
||||
# | |-- 1.pgm
|
||||
# | |-- ...
|
||||
# | |-- 10.pgm
|
||||
#
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
if len(sys.argv) != 2:
|
||||
print "usage: create_csv <base_path>"
|
||||
sys.exit(1)
|
||||
|
||||
BASE_PATH=sys.argv[1]
|
||||
SEPARATOR=";"
|
||||
|
||||
label = 0
|
||||
for dirname, dirnames, filenames in os.walk(BASE_PATH):
|
||||
for subdirname in dirnames:
|
||||
subject_path = os.path.join(dirname, subdirname)
|
||||
for filename in os.listdir(subject_path):
|
||||
abs_path = "%s/%s" % (subject_path, filename)
|
||||
print "%s%s%d" % (abs_path, SEPARATOR, label)
|
||||
label = label + 1
|
@ -1,112 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# Software License Agreement (BSD License)
|
||||
#
|
||||
# Copyright (c) 2012, Philipp Wagner
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions
|
||||
# are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following
|
||||
# disclaimer in the documentation and/or other materials provided
|
||||
# with the distribution.
|
||||
# * Neither the name of the author nor the names of its
|
||||
# contributors may be used to endorse or promote products derived
|
||||
# from this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
||||
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
||||
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
||||
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
||||
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
# POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
import sys, math, Image
|
||||
|
||||
def Distance(p1,p2):
|
||||
dx = p2[0] - p1[0]
|
||||
dy = p2[1] - p1[1]
|
||||
return math.sqrt(dx*dx+dy*dy)
|
||||
|
||||
def ScaleRotateTranslate(image, angle, center = None, new_center = None, scale = None, resample=Image.BICUBIC):
|
||||
if (scale is None) and (center is None):
|
||||
return image.rotate(angle=angle, resample=resample)
|
||||
nx,ny = x,y = center
|
||||
sx=sy=1.0
|
||||
if new_center:
|
||||
(nx,ny) = new_center
|
||||
if scale:
|
||||
(sx,sy) = (scale, scale)
|
||||
cosine = math.cos(angle)
|
||||
sine = math.sin(angle)
|
||||
a = cosine/sx
|
||||
b = sine/sx
|
||||
c = x-nx*a-ny*b
|
||||
d = -sine/sy
|
||||
e = cosine/sy
|
||||
f = y-nx*d-ny*e
|
||||
return image.transform(image.size, Image.AFFINE, (a,b,c,d,e,f), resample=resample)
|
||||
|
||||
def CropFace(image, eye_left=(0,0), eye_right=(0,0), offset_pct=(0.2,0.2), dest_sz = (70,70)):
|
||||
# calculate offsets in original image
|
||||
offset_h = math.floor(float(offset_pct[0])*dest_sz[0])
|
||||
offset_v = math.floor(float(offset_pct[1])*dest_sz[1])
|
||||
# get the direction
|
||||
eye_direction = (eye_right[0] - eye_left[0], eye_right[1] - eye_left[1])
|
||||
# calc rotation angle in radians
|
||||
rotation = -math.atan2(float(eye_direction[1]),float(eye_direction[0]))
|
||||
# distance between them
|
||||
dist = Distance(eye_left, eye_right)
|
||||
# calculate the reference eye-width
|
||||
reference = dest_sz[0] - 2.0*offset_h
|
||||
# scale factor
|
||||
scale = float(dist)/float(reference)
|
||||
# rotate original around the left eye
|
||||
image = ScaleRotateTranslate(image, center=eye_left, angle=rotation)
|
||||
# crop the rotated image
|
||||
crop_xy = (eye_left[0] - scale*offset_h, eye_left[1] - scale*offset_v)
|
||||
crop_size = (dest_sz[0]*scale, dest_sz[1]*scale)
|
||||
image = image.crop((int(crop_xy[0]), int(crop_xy[1]), int(crop_xy[0]+crop_size[0]), int(crop_xy[1]+crop_size[1])))
|
||||
# resize it
|
||||
image = image.resize(dest_sz, Image.ANTIALIAS)
|
||||
return image
|
||||
|
||||
def readFileNames():
|
||||
try:
|
||||
inFile = open('path_to_created_csv_file.csv')
|
||||
except:
|
||||
raise IOError('There is no file named path_to_created_csv_file.csv in current directory.')
|
||||
return False
|
||||
|
||||
picPath = []
|
||||
picIndex = []
|
||||
|
||||
for line in inFile.readlines():
|
||||
if line != '':
|
||||
fields = line.rstrip().split(';')
|
||||
picPath.append(fields[0])
|
||||
picIndex.append(int(fields[1]))
|
||||
|
||||
return (picPath, picIndex)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
[images, indexes]=readFileNames()
|
||||
if not os.path.exists("modified"):
|
||||
os.makedirs("modified")
|
||||
for img in images:
|
||||
image = Image.open(img)
|
||||
CropFace(image, eye_left=(252,364), eye_right=(420,366), offset_pct=(0.1,0.1), dest_sz=(200,200)).save("modified/"+img.rstrip().split('/')[1]+"_10_10_200_200.jpg")
|
||||
CropFace(image, eye_left=(252,364), eye_right=(420,366), offset_pct=(0.2,0.2), dest_sz=(200,200)).save("modified/"+img.rstrip().split('/')[1]+"_20_20_200_200.jpg")
|
||||
CropFace(image, eye_left=(252,364), eye_right=(420,366), offset_pct=(0.3,0.3), dest_sz=(200,200)).save("modified/"+img.rstrip().split('/')[1]+"_30_30_200_200.jpg")
|
||||
CropFace(image, eye_left=(252,364), eye_right=(420,366), offset_pct=(0.2,0.2)).save("modified/"+img.rstrip().split('/')[1]+"_20_20_70_70.jpg")
|
@ -1,169 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2011. Philipp Wagner <bytefish[at]gmx[dot]de>.
|
||||
* Released to public domain under terms of the BSD Simplified license.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* * Neither the name of the organization nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
*
|
||||
* See <http://www.opensource.org/licenses/bsd-license>
|
||||
*/
|
||||
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/contrib.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
|
||||
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
#include <sstream>
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
static Mat norm_0_255(InputArray _src) {
|
||||
Mat src = _src.getMat();
|
||||
// Create and return normalized image:
|
||||
Mat dst;
|
||||
switch(src.channels()) {
|
||||
case 1:
|
||||
cv::normalize(_src, dst, 0, 255, NORM_MINMAX, CV_8UC1);
|
||||
break;
|
||||
case 3:
|
||||
cv::normalize(_src, dst, 0, 255, NORM_MINMAX, CV_8UC3);
|
||||
break;
|
||||
default:
|
||||
src.copyTo(dst);
|
||||
break;
|
||||
}
|
||||
return dst;
|
||||
}
|
||||
|
||||
static void read_csv(const string& filename, vector<Mat>& images, vector<int>& labels, char separator = ';') {
|
||||
std::ifstream file(filename.c_str(), ifstream::in);
|
||||
if (!file) {
|
||||
string error_message = "No valid input file was given, please check the given filename.";
|
||||
CV_Error(CV_StsBadArg, error_message);
|
||||
}
|
||||
string line, path, classlabel;
|
||||
while (getline(file, line)) {
|
||||
stringstream liness(line);
|
||||
getline(liness, path, separator);
|
||||
getline(liness, classlabel);
|
||||
if(!path.empty() && !classlabel.empty()) {
|
||||
images.push_back(imread(path, 0));
|
||||
labels.push_back(atoi(classlabel.c_str()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, const char *argv[]) {
|
||||
// Check for valid command line arguments, print usage
|
||||
// if no arguments were given.
|
||||
if (argc != 2) {
|
||||
cout << "usage: " << argv[0] << " <csv.ext>" << endl;
|
||||
exit(1);
|
||||
}
|
||||
// Get the path to your CSV.
|
||||
string fn_csv = string(argv[1]);
|
||||
// These vectors hold the images and corresponding labels.
|
||||
vector<Mat> images;
|
||||
vector<int> labels;
|
||||
// Read in the data. This can fail if no valid
|
||||
// input filename is given.
|
||||
try {
|
||||
read_csv(fn_csv, images, labels);
|
||||
} catch (cv::Exception& e) {
|
||||
cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
|
||||
// nothing more we can do
|
||||
exit(1);
|
||||
}
|
||||
// Quit if there are not enough images for this demo.
|
||||
if(images.size() <= 1) {
|
||||
string error_message = "This demo needs at least 2 images to work. Please add more images to your data set!";
|
||||
CV_Error(CV_StsError, error_message);
|
||||
}
|
||||
// Get the height from the first image. We'll need this
|
||||
// later in code to reshape the images to their original
|
||||
// size:
|
||||
int height = images[0].rows;
|
||||
// The following lines simply get the last images from
|
||||
// your dataset and remove it from the vector. This is
|
||||
// done, so that the training data (which we learn the
|
||||
// cv::FaceRecognizer on) and the test data we test
|
||||
// the model with, do not overlap.
|
||||
Mat testSample = images[images.size() - 1];
|
||||
int testLabel = labels[labels.size() - 1];
|
||||
images.pop_back();
|
||||
labels.pop_back();
|
||||
// The following lines create an Eigenfaces model for
|
||||
// face recognition and train it with the images and
|
||||
// labels read from the given CSV file.
|
||||
// This here is a full PCA, if you just want to keep
|
||||
// 10 principal components (read Eigenfaces), then call
|
||||
// the factory method like this:
|
||||
//
|
||||
// cv::createEigenFaceRecognizer(10);
|
||||
//
|
||||
// If you want to create a FaceRecognizer with a
|
||||
// confidennce threshold, call it with:
|
||||
//
|
||||
// cv::createEigenFaceRecognizer(10, 123.0);
|
||||
//
|
||||
Ptr<FaceRecognizer> model = createFisherFaceRecognizer();
|
||||
model->train(images, labels);
|
||||
// The following line predicts the label of a given
|
||||
// test image:
|
||||
int predictedLabel = model->predict(testSample);
|
||||
//
|
||||
// To get the confidence of a prediction call the model with:
|
||||
//
|
||||
// int predictedLabel = -1;
|
||||
// double confidence = 0.0;
|
||||
// model->predict(testSample, predictedLabel, confidence);
|
||||
//
|
||||
string result_message = format("Predicted class = %d / Actual class = %d.", predictedLabel, testLabel);
|
||||
cout << result_message << endl;
|
||||
// Sometimes you'll need to get/set internal model data,
|
||||
// which isn't exposed by the public cv::FaceRecognizer.
|
||||
// Since each cv::FaceRecognizer is derived from a
|
||||
// cv::Algorithm, you can query the data.
|
||||
//
|
||||
// First we'll use it to set the threshold of the FaceRecognizer
|
||||
// to 0.0 without retraining the model. This can be useful if
|
||||
// you are evaluating the model:
|
||||
//
|
||||
model->set("threshold", 0.0);
|
||||
// Now the threshold of this model is set to 0.0. A prediction
|
||||
// now returns -1, as it's impossible to have a distance below
|
||||
// it
|
||||
predictedLabel = model->predict(testSample);
|
||||
cout << "Predicted class = " << predictedLabel << endl;
|
||||
// Here is how to get the eigenvalues of this Eigenfaces model:
|
||||
Mat eigenvalues = model->getMat("eigenvalues");
|
||||
// And we can do the same to display the Eigenvectors (read Eigenfaces):
|
||||
Mat W = model->getMat("eigenvectors");
|
||||
// From this we will display the (at most) first 10 Eigenfaces:
|
||||
for (int i = 0; i < min(10, W.cols); i++) {
|
||||
string msg = format("Eigenvalue #%d = %.5f", i, eigenvalues.at<double>(i));
|
||||
cout << msg << endl;
|
||||
// get eigenvector #i
|
||||
Mat ev = W.col(i).clone();
|
||||
// Reshape to original size & normalize to [0...255] for imshow.
|
||||
Mat grayscale = norm_0_255(ev.reshape(1, height));
|
||||
// Show the image & apply a Jet colormap for better sensing.
|
||||
Mat cgrayscale;
|
||||
applyColorMap(grayscale, cgrayscale, COLORMAP_JET);
|
||||
imshow(format("%d", i), cgrayscale);
|
||||
}
|
||||
waitKey(0);
|
||||
|
||||
return 0;
|
||||
}
|
@ -1,193 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2011. Philipp Wagner <bytefish[at]gmx[dot]de>.
|
||||
* Released to public domain under terms of the BSD Simplified license.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* * Neither the name of the organization nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
*
|
||||
* See <http://www.opensource.org/licenses/bsd-license>
|
||||
*/
|
||||
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/contrib.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
#include <sstream>
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
static Mat norm_0_255(InputArray _src) {
|
||||
Mat src = _src.getMat();
|
||||
// Create and return normalized image:
|
||||
Mat dst;
|
||||
switch(src.channels()) {
|
||||
case 1:
|
||||
cv::normalize(_src, dst, 0, 255, NORM_MINMAX, CV_8UC1);
|
||||
break;
|
||||
case 3:
|
||||
cv::normalize(_src, dst, 0, 255, NORM_MINMAX, CV_8UC3);
|
||||
break;
|
||||
default:
|
||||
src.copyTo(dst);
|
||||
break;
|
||||
}
|
||||
return dst;
|
||||
}
|
||||
|
||||
static void read_csv(const string& filename, vector<Mat>& images, vector<int>& labels, char separator = ';') {
|
||||
std::ifstream file(filename.c_str(), ifstream::in);
|
||||
if (!file) {
|
||||
string error_message = "No valid input file was given, please check the given filename.";
|
||||
CV_Error(CV_StsBadArg, error_message);
|
||||
}
|
||||
string line, path, classlabel;
|
||||
while (getline(file, line)) {
|
||||
stringstream liness(line);
|
||||
getline(liness, path, separator);
|
||||
getline(liness, classlabel);
|
||||
if(!path.empty() && !classlabel.empty()) {
|
||||
images.push_back(imread(path, 0));
|
||||
labels.push_back(atoi(classlabel.c_str()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, const char *argv[]) {
|
||||
// Check for valid command line arguments, print usage
|
||||
// if no arguments were given.
|
||||
if (argc < 2) {
|
||||
cout << "usage: " << argv[0] << " <csv.ext> <output_folder> " << endl;
|
||||
exit(1);
|
||||
}
|
||||
string output_folder = ".";
|
||||
if (argc == 3) {
|
||||
output_folder = string(argv[2]);
|
||||
}
|
||||
// Get the path to your CSV.
|
||||
string fn_csv = string(argv[1]);
|
||||
// These vectors hold the images and corresponding labels.
|
||||
vector<Mat> images;
|
||||
vector<int> labels;
|
||||
// Read in the data. This can fail if no valid
|
||||
// input filename is given.
|
||||
try {
|
||||
read_csv(fn_csv, images, labels);
|
||||
} catch (cv::Exception& e) {
|
||||
cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
|
||||
// nothing more we can do
|
||||
exit(1);
|
||||
}
|
||||
// Quit if there are not enough images for this demo.
|
||||
if(images.size() <= 1) {
|
||||
string error_message = "This demo needs at least 2 images to work. Please add more images to your data set!";
|
||||
CV_Error(CV_StsError, error_message);
|
||||
}
|
||||
// Get the height from the first image. We'll need this
|
||||
// later in code to reshape the images to their original
|
||||
// size:
|
||||
int height = images[0].rows;
|
||||
// The following lines simply get the last images from
|
||||
// your dataset and remove it from the vector. This is
|
||||
// done, so that the training data (which we learn the
|
||||
// cv::FaceRecognizer on) and the test data we test
|
||||
// the model with, do not overlap.
|
||||
Mat testSample = images[images.size() - 1];
|
||||
int testLabel = labels[labels.size() - 1];
|
||||
images.pop_back();
|
||||
labels.pop_back();
|
||||
// The following lines create an Eigenfaces model for
|
||||
// face recognition and train it with the images and
|
||||
// labels read from the given CSV file.
|
||||
// This here is a full PCA, if you just want to keep
|
||||
// 10 principal components (read Eigenfaces), then call
|
||||
// the factory method like this:
|
||||
//
|
||||
// cv::createEigenFaceRecognizer(10);
|
||||
//
|
||||
// If you want to create a FaceRecognizer with a
|
||||
// confidence threshold (e.g. 123.0), call it with:
|
||||
//
|
||||
// cv::createEigenFaceRecognizer(10, 123.0);
|
||||
//
|
||||
// If you want to use _all_ Eigenfaces and have a threshold,
|
||||
// then call the method like this:
|
||||
//
|
||||
// cv::createEigenFaceRecognizer(0, 123.0);
|
||||
//
|
||||
Ptr<FaceRecognizer> model = createEigenFaceRecognizer();
|
||||
model->train(images, labels);
|
||||
// The following line predicts the label of a given
|
||||
// test image:
|
||||
int predictedLabel = model->predict(testSample);
|
||||
//
|
||||
// To get the confidence of a prediction call the model with:
|
||||
//
|
||||
// int predictedLabel = -1;
|
||||
// double confidence = 0.0;
|
||||
// model->predict(testSample, predictedLabel, confidence);
|
||||
//
|
||||
string result_message = format("Predicted class = %d / Actual class = %d.", predictedLabel, testLabel);
|
||||
cout << result_message << endl;
|
||||
// Here is how to get the eigenvalues of this Eigenfaces model:
|
||||
Mat eigenvalues = model->getMat("eigenvalues");
|
||||
// And we can do the same to display the Eigenvectors (read Eigenfaces):
|
||||
Mat W = model->getMat("eigenvectors");
|
||||
// Get the sample mean from the training data
|
||||
Mat mean = model->getMat("mean");
|
||||
// Display or save:
|
||||
if(argc == 2) {
|
||||
imshow("mean", norm_0_255(mean.reshape(1, images[0].rows)));
|
||||
} else {
|
||||
imwrite(format("%s/mean.png", output_folder.c_str()), norm_0_255(mean.reshape(1, images[0].rows)));
|
||||
}
|
||||
// Display or save the Eigenfaces:
|
||||
for (int i = 0; i < min(10, W.cols); i++) {
|
||||
string msg = format("Eigenvalue #%d = %.5f", i, eigenvalues.at<double>(i));
|
||||
cout << msg << endl;
|
||||
// get eigenvector #i
|
||||
Mat ev = W.col(i).clone();
|
||||
// Reshape to original size & normalize to [0...255] for imshow.
|
||||
Mat grayscale = norm_0_255(ev.reshape(1, height));
|
||||
// Show the image & apply a Jet colormap for better sensing.
|
||||
Mat cgrayscale;
|
||||
applyColorMap(grayscale, cgrayscale, COLORMAP_JET);
|
||||
// Display or save:
|
||||
if(argc == 2) {
|
||||
imshow(format("eigenface_%d", i), cgrayscale);
|
||||
} else {
|
||||
imwrite(format("%s/eigenface_%d.png", output_folder.c_str(), i), norm_0_255(cgrayscale));
|
||||
}
|
||||
}
|
||||
|
||||
// Display or save the image reconstruction at some predefined steps:
|
||||
for(int num_components = min(W.cols, 10); num_components < min(W.cols, 300); num_components+=15) {
|
||||
// slice the eigenvectors from the model
|
||||
Mat evs = Mat(W, Range::all(), Range(0, num_components));
|
||||
Mat projection = subspaceProject(evs, mean, images[0].reshape(1,1));
|
||||
Mat reconstruction = subspaceReconstruct(evs, mean, projection);
|
||||
// Normalize the result:
|
||||
reconstruction = norm_0_255(reconstruction.reshape(1, images[0].rows));
|
||||
// Display or save:
|
||||
if(argc == 2) {
|
||||
imshow(format("eigenface_reconstruction_%d", num_components), reconstruction);
|
||||
} else {
|
||||
imwrite(format("%s/eigenface_reconstruction_%d.png", output_folder.c_str(), num_components), reconstruction);
|
||||
}
|
||||
}
|
||||
// Display if we are not writing to an output folder:
|
||||
if(argc == 2) {
|
||||
waitKey(0);
|
||||
}
|
||||
return 0;
|
||||
}
|
@ -1,191 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2011. Philipp Wagner <bytefish[at]gmx[dot]de>.
|
||||
* Released to public domain under terms of the BSD Simplified license.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* * Neither the name of the organization nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
*
|
||||
* See <http://www.opensource.org/licenses/bsd-license>
|
||||
*/
|
||||
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/contrib.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
#include <sstream>
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
static Mat norm_0_255(InputArray _src) {
|
||||
Mat src = _src.getMat();
|
||||
// Create and return normalized image:
|
||||
Mat dst;
|
||||
switch(src.channels()) {
|
||||
case 1:
|
||||
cv::normalize(_src, dst, 0, 255, NORM_MINMAX, CV_8UC1);
|
||||
break;
|
||||
case 3:
|
||||
cv::normalize(_src, dst, 0, 255, NORM_MINMAX, CV_8UC3);
|
||||
break;
|
||||
default:
|
||||
src.copyTo(dst);
|
||||
break;
|
||||
}
|
||||
return dst;
|
||||
}
|
||||
|
||||
static void read_csv(const string& filename, vector<Mat>& images, vector<int>& labels, char separator = ';') {
|
||||
std::ifstream file(filename.c_str(), ifstream::in);
|
||||
if (!file) {
|
||||
string error_message = "No valid input file was given, please check the given filename.";
|
||||
CV_Error(CV_StsBadArg, error_message);
|
||||
}
|
||||
string line, path, classlabel;
|
||||
while (getline(file, line)) {
|
||||
stringstream liness(line);
|
||||
getline(liness, path, separator);
|
||||
getline(liness, classlabel);
|
||||
if(!path.empty() && !classlabel.empty()) {
|
||||
images.push_back(imread(path, 0));
|
||||
labels.push_back(atoi(classlabel.c_str()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, const char *argv[]) {
|
||||
// Check for valid command line arguments, print usage
|
||||
// if no arguments were given.
|
||||
if (argc < 2) {
|
||||
cout << "usage: " << argv[0] << " <csv.ext> <output_folder> " << endl;
|
||||
exit(1);
|
||||
}
|
||||
string output_folder = ".";
|
||||
if (argc == 3) {
|
||||
output_folder = string(argv[2]);
|
||||
}
|
||||
// Get the path to your CSV.
|
||||
string fn_csv = string(argv[1]);
|
||||
// These vectors hold the images and corresponding labels.
|
||||
vector<Mat> images;
|
||||
vector<int> labels;
|
||||
// Read in the data. This can fail if no valid
|
||||
// input filename is given.
|
||||
try {
|
||||
read_csv(fn_csv, images, labels);
|
||||
} catch (cv::Exception& e) {
|
||||
cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
|
||||
// nothing more we can do
|
||||
exit(1);
|
||||
}
|
||||
// Quit if there are not enough images for this demo.
|
||||
if(images.size() <= 1) {
|
||||
string error_message = "This demo needs at least 2 images to work. Please add more images to your data set!";
|
||||
CV_Error(CV_StsError, error_message);
|
||||
}
|
||||
// Get the height from the first image. We'll need this
|
||||
// later in code to reshape the images to their original
|
||||
// size:
|
||||
int height = images[0].rows;
|
||||
// The following lines simply get the last images from
|
||||
// your dataset and remove it from the vector. This is
|
||||
// done, so that the training data (which we learn the
|
||||
// cv::FaceRecognizer on) and the test data we test
|
||||
// the model with, do not overlap.
|
||||
Mat testSample = images[images.size() - 1];
|
||||
int testLabel = labels[labels.size() - 1];
|
||||
images.pop_back();
|
||||
labels.pop_back();
|
||||
// The following lines create an Fisherfaces model for
|
||||
// face recognition and train it with the images and
|
||||
// labels read from the given CSV file.
|
||||
// If you just want to keep 10 Fisherfaces, then call
|
||||
// the factory method like this:
|
||||
//
|
||||
// cv::createFisherFaceRecognizer(10);
|
||||
//
|
||||
// However it is not useful to discard Fisherfaces! Please
|
||||
// always try to use _all_ available Fisherfaces for
|
||||
// classification.
|
||||
//
|
||||
// If you want to create a FaceRecognizer with a
|
||||
// confidence threshold (e.g. 123.0) and use _all_
|
||||
// Fisherfaces, then call it with:
|
||||
//
|
||||
// cv::createFisherFaceRecognizer(0, 123.0);
|
||||
//
|
||||
Ptr<FaceRecognizer> model = createFisherFaceRecognizer();
|
||||
model->train(images, labels);
|
||||
// The following line predicts the label of a given
|
||||
// test image:
|
||||
int predictedLabel = model->predict(testSample);
|
||||
//
|
||||
// To get the confidence of a prediction call the model with:
|
||||
//
|
||||
// int predictedLabel = -1;
|
||||
// double confidence = 0.0;
|
||||
// model->predict(testSample, predictedLabel, confidence);
|
||||
//
|
||||
string result_message = format("Predicted class = %d / Actual class = %d.", predictedLabel, testLabel);
|
||||
cout << result_message << endl;
|
||||
// Here is how to get the eigenvalues of this Eigenfaces model:
|
||||
Mat eigenvalues = model->getMat("eigenvalues");
|
||||
// And we can do the same to display the Eigenvectors (read Eigenfaces):
|
||||
Mat W = model->getMat("eigenvectors");
|
||||
// Get the sample mean from the training data
|
||||
Mat mean = model->getMat("mean");
|
||||
// Display or save:
|
||||
if(argc == 2) {
|
||||
imshow("mean", norm_0_255(mean.reshape(1, images[0].rows)));
|
||||
} else {
|
||||
imwrite(format("%s/mean.png", output_folder.c_str()), norm_0_255(mean.reshape(1, images[0].rows)));
|
||||
}
|
||||
// Display or save the first, at most 16 Fisherfaces:
|
||||
for (int i = 0; i < min(16, W.cols); i++) {
|
||||
string msg = format("Eigenvalue #%d = %.5f", i, eigenvalues.at<double>(i));
|
||||
cout << msg << endl;
|
||||
// get eigenvector #i
|
||||
Mat ev = W.col(i).clone();
|
||||
// Reshape to original size & normalize to [0...255] for imshow.
|
||||
Mat grayscale = norm_0_255(ev.reshape(1, height));
|
||||
// Show the image & apply a Bone colormap for better sensing.
|
||||
Mat cgrayscale;
|
||||
applyColorMap(grayscale, cgrayscale, COLORMAP_BONE);
|
||||
// Display or save:
|
||||
if(argc == 2) {
|
||||
imshow(format("fisherface_%d", i), cgrayscale);
|
||||
} else {
|
||||
imwrite(format("%s/fisherface_%d.png", output_folder.c_str(), i), norm_0_255(cgrayscale));
|
||||
}
|
||||
}
|
||||
// Display or save the image reconstruction at some predefined steps:
|
||||
for(int num_component = 0; num_component < min(16, W.cols); num_component++) {
|
||||
// Slice the Fisherface from the model:
|
||||
Mat ev = W.col(num_component);
|
||||
Mat projection = subspaceProject(ev, mean, images[0].reshape(1,1));
|
||||
Mat reconstruction = subspaceReconstruct(ev, mean, projection);
|
||||
// Normalize the result:
|
||||
reconstruction = norm_0_255(reconstruction.reshape(1, images[0].rows));
|
||||
// Display or save:
|
||||
if(argc == 2) {
|
||||
imshow(format("fisherface_reconstruction_%d", num_component), reconstruction);
|
||||
} else {
|
||||
imwrite(format("%s/fisherface_reconstruction_%d.png", output_folder.c_str(), num_component), reconstruction);
|
||||
}
|
||||
}
|
||||
// Display if we are not writing to an output folder:
|
||||
if(argc == 2) {
|
||||
waitKey(0);
|
||||
}
|
||||
return 0;
|
||||
}
|
@ -1,155 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2011. Philipp Wagner <bytefish[at]gmx[dot]de>.
|
||||
* Released to public domain under terms of the BSD Simplified license.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* * Neither the name of the organization nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
*
|
||||
* See <http://www.opensource.org/licenses/bsd-license>
|
||||
*/
|
||||
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/contrib.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
#include <sstream>
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
static void read_csv(const string& filename, vector<Mat>& images, vector<int>& labels, char separator = ';') {
|
||||
std::ifstream file(filename.c_str(), ifstream::in);
|
||||
if (!file) {
|
||||
string error_message = "No valid input file was given, please check the given filename.";
|
||||
CV_Error(CV_StsBadArg, error_message);
|
||||
}
|
||||
string line, path, classlabel;
|
||||
while (getline(file, line)) {
|
||||
stringstream liness(line);
|
||||
getline(liness, path, separator);
|
||||
getline(liness, classlabel);
|
||||
if(!path.empty() && !classlabel.empty()) {
|
||||
images.push_back(imread(path, 0));
|
||||
labels.push_back(atoi(classlabel.c_str()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, const char *argv[]) {
|
||||
// Check for valid command line arguments, print usage
|
||||
// if no arguments were given.
|
||||
if (argc != 2) {
|
||||
cout << "usage: " << argv[0] << " <csv.ext>" << endl;
|
||||
exit(1);
|
||||
}
|
||||
// Get the path to your CSV.
|
||||
string fn_csv = string(argv[1]);
|
||||
// These vectors hold the images and corresponding labels.
|
||||
vector<Mat> images;
|
||||
vector<int> labels;
|
||||
// Read in the data. This can fail if no valid
|
||||
// input filename is given.
|
||||
try {
|
||||
read_csv(fn_csv, images, labels);
|
||||
} catch (cv::Exception& e) {
|
||||
cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
|
||||
// nothing more we can do
|
||||
exit(1);
|
||||
}
|
||||
// Quit if there are not enough images for this demo.
|
||||
if(images.size() <= 1) {
|
||||
string error_message = "This demo needs at least 2 images to work. Please add more images to your data set!";
|
||||
CV_Error(CV_StsError, error_message);
|
||||
}
|
||||
// Get the height from the first image. We'll need this
|
||||
// later in code to reshape the images to their original
|
||||
// size:
|
||||
int height = images[0].rows;
|
||||
// The following lines simply get the last images from
|
||||
// your dataset and remove it from the vector. This is
|
||||
// done, so that the training data (which we learn the
|
||||
// cv::FaceRecognizer on) and the test data we test
|
||||
// the model with, do not overlap.
|
||||
Mat testSample = images[images.size() - 1];
|
||||
int testLabel = labels[labels.size() - 1];
|
||||
images.pop_back();
|
||||
labels.pop_back();
|
||||
// The following lines create an LBPH model for
|
||||
// face recognition and train it with the images and
|
||||
// labels read from the given CSV file.
|
||||
//
|
||||
// The LBPHFaceRecognizer uses Extended Local Binary Patterns
|
||||
// (it's probably configurable with other operators at a later
|
||||
// point), and has the following default values
|
||||
//
|
||||
// radius = 1
|
||||
// neighbors = 8
|
||||
// grid_x = 8
|
||||
// grid_y = 8
|
||||
//
|
||||
// So if you want a LBPH FaceRecognizer using a radius of
|
||||
// 2 and 16 neighbors, call the factory method with:
|
||||
//
|
||||
// cv::createLBPHFaceRecognizer(2, 16);
|
||||
//
|
||||
// And if you want a threshold (e.g. 123.0) call it with its default values:
|
||||
//
|
||||
// cv::createLBPHFaceRecognizer(1,8,8,8,123.0)
|
||||
//
|
||||
Ptr<FaceRecognizer> model = createLBPHFaceRecognizer();
|
||||
model->train(images, labels);
|
||||
// The following line predicts the label of a given
|
||||
// test image:
|
||||
int predictedLabel = model->predict(testSample);
|
||||
//
|
||||
// To get the confidence of a prediction call the model with:
|
||||
//
|
||||
// int predictedLabel = -1;
|
||||
// double confidence = 0.0;
|
||||
// model->predict(testSample, predictedLabel, confidence);
|
||||
//
|
||||
string result_message = format("Predicted class = %d / Actual class = %d.", predictedLabel, testLabel);
|
||||
cout << result_message << endl;
|
||||
// Sometimes you'll need to get/set internal model data,
|
||||
// which isn't exposed by the public cv::FaceRecognizer.
|
||||
// Since each cv::FaceRecognizer is derived from a
|
||||
// cv::Algorithm, you can query the data.
|
||||
//
|
||||
// First we'll use it to set the threshold of the FaceRecognizer
|
||||
// to 0.0 without retraining the model. This can be useful if
|
||||
// you are evaluating the model:
|
||||
//
|
||||
model->set("threshold", 0.0);
|
||||
// Now the threshold of this model is set to 0.0. A prediction
|
||||
// now returns -1, as it's impossible to have a distance below
|
||||
// it
|
||||
predictedLabel = model->predict(testSample);
|
||||
cout << "Predicted class = " << predictedLabel << endl;
|
||||
// Show some informations about the model, as there's no cool
|
||||
// Model data to display as in Eigenfaces/Fisherfaces.
|
||||
// Due to efficiency reasons the LBP images are not stored
|
||||
// within the model:
|
||||
cout << "Model Information:" << endl;
|
||||
string model_info = format("\tLBPH(radius=%i, neighbors=%i, grid_x=%i, grid_y=%i, threshold=%.2f)",
|
||||
model->getInt("radius"),
|
||||
model->getInt("neighbors"),
|
||||
model->getInt("grid_x"),
|
||||
model->getInt("grid_y"),
|
||||
model->getDouble("threshold"));
|
||||
cout << model_info << endl;
|
||||
// We could get the histograms for example:
|
||||
vector<Mat> histograms = model->getMatVector("histograms");
|
||||
// But should I really visualize it? Probably the length is interesting:
|
||||
cout << "Size of the histograms: " << histograms[0].total() << endl;
|
||||
return 0;
|
||||
}
|
@ -1,200 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2011. Philipp Wagner <bytefish[at]gmx[dot]de>.
|
||||
* Released to public domain under terms of the BSD Simplified license.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* * Neither the name of the organization nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
*
|
||||
* See <http://www.opensource.org/licenses/bsd-license>
|
||||
*/
|
||||
|
||||
#include "opencv2/contrib.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
#include <sstream>
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
static Mat norm_0_255(InputArray _src) {
|
||||
Mat src = _src.getMat();
|
||||
// Create and return normalized image:
|
||||
Mat dst;
|
||||
switch(src.channels()) {
|
||||
case 1:
|
||||
cv::normalize(_src, dst, 0, 255, NORM_MINMAX, CV_8UC1);
|
||||
break;
|
||||
case 3:
|
||||
cv::normalize(_src, dst, 0, 255, NORM_MINMAX, CV_8UC3);
|
||||
break;
|
||||
default:
|
||||
src.copyTo(dst);
|
||||
break;
|
||||
}
|
||||
return dst;
|
||||
}
|
||||
|
||||
static void read_csv(const string& filename, vector<Mat>& images, vector<int>& labels, char separator = ';') {
|
||||
std::ifstream file(filename.c_str(), ifstream::in);
|
||||
if (!file) {
|
||||
string error_message = "No valid input file was given, please check the given filename.";
|
||||
CV_Error(CV_StsBadArg, error_message);
|
||||
}
|
||||
string line, path, classlabel;
|
||||
while (getline(file, line)) {
|
||||
stringstream liness(line);
|
||||
getline(liness, path, separator);
|
||||
getline(liness, classlabel);
|
||||
if(!path.empty() && !classlabel.empty()) {
|
||||
images.push_back(imread(path, 0));
|
||||
labels.push_back(atoi(classlabel.c_str()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, const char *argv[]) {
|
||||
// Check for valid command line arguments, print usage
|
||||
// if no arguments were given.
|
||||
if (argc < 2) {
|
||||
cout << "usage: " << argv[0] << " <csv.ext> <output_folder> " << endl;
|
||||
exit(1);
|
||||
}
|
||||
string output_folder = ".";
|
||||
if (argc == 3) {
|
||||
output_folder = string(argv[2]);
|
||||
}
|
||||
// Get the path to your CSV.
|
||||
string fn_csv = string(argv[1]);
|
||||
// These vectors hold the images and corresponding labels.
|
||||
vector<Mat> images;
|
||||
vector<int> labels;
|
||||
// Read in the data. This can fail if no valid
|
||||
// input filename is given.
|
||||
try {
|
||||
read_csv(fn_csv, images, labels);
|
||||
} catch (cv::Exception& e) {
|
||||
cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
|
||||
// nothing more we can do
|
||||
exit(1);
|
||||
}
|
||||
// Quit if there are not enough images for this demo.
|
||||
if(images.size() <= 1) {
|
||||
string error_message = "This demo needs at least 2 images to work. Please add more images to your data set!";
|
||||
CV_Error(CV_StsError, error_message);
|
||||
}
|
||||
// Get the height from the first image. We'll need this
|
||||
// later in code to reshape the images to their original
|
||||
// size:
|
||||
int height = images[0].rows;
|
||||
// The following lines simply get the last images from
|
||||
// your dataset and remove it from the vector. This is
|
||||
// done, so that the training data (which we learn the
|
||||
// cv::FaceRecognizer on) and the test data we test
|
||||
// the model with, do not overlap.
|
||||
Mat testSample = images[images.size() - 1];
|
||||
int testLabel = labels[labels.size() - 1];
|
||||
images.pop_back();
|
||||
labels.pop_back();
|
||||
// The following lines create an Eigenfaces model for
|
||||
// face recognition and train it with the images and
|
||||
// labels read from the given CSV file.
|
||||
// This here is a full PCA, if you just want to keep
|
||||
// 10 principal components (read Eigenfaces), then call
|
||||
// the factory method like this:
|
||||
//
|
||||
// cv::createEigenFaceRecognizer(10);
|
||||
//
|
||||
// If you want to create a FaceRecognizer with a
|
||||
// confidence threshold (e.g. 123.0), call it with:
|
||||
//
|
||||
// cv::createEigenFaceRecognizer(10, 123.0);
|
||||
//
|
||||
// If you want to use _all_ Eigenfaces and have a threshold,
|
||||
// then call the method like this:
|
||||
//
|
||||
// cv::createEigenFaceRecognizer(0, 123.0);
|
||||
//
|
||||
Ptr<FaceRecognizer> model0 = createEigenFaceRecognizer();
|
||||
model0->train(images, labels);
|
||||
// save the model to eigenfaces_at.yaml
|
||||
model0->save("eigenfaces_at.yml");
|
||||
//
|
||||
//
|
||||
// Now create a new Eigenfaces Recognizer
|
||||
//
|
||||
Ptr<FaceRecognizer> model1 = createEigenFaceRecognizer();
|
||||
model1->load("eigenfaces_at.yml");
|
||||
// The following line predicts the label of a given
|
||||
// test image:
|
||||
int predictedLabel = model1->predict(testSample);
|
||||
//
|
||||
// To get the confidence of a prediction call the model with:
|
||||
//
|
||||
// int predictedLabel = -1;
|
||||
// double confidence = 0.0;
|
||||
// model->predict(testSample, predictedLabel, confidence);
|
||||
//
|
||||
string result_message = format("Predicted class = %d / Actual class = %d.", predictedLabel, testLabel);
|
||||
cout << result_message << endl;
|
||||
// Here is how to get the eigenvalues of this Eigenfaces model:
|
||||
Mat eigenvalues = model1->getMat("eigenvalues");
|
||||
// And we can do the same to display the Eigenvectors (read Eigenfaces):
|
||||
Mat W = model1->getMat("eigenvectors");
|
||||
// Get the sample mean from the training data
|
||||
Mat mean = model1->getMat("mean");
|
||||
// Display or save:
|
||||
if(argc == 2) {
|
||||
imshow("mean", norm_0_255(mean.reshape(1, images[0].rows)));
|
||||
} else {
|
||||
imwrite(format("%s/mean.png", output_folder.c_str()), norm_0_255(mean.reshape(1, images[0].rows)));
|
||||
}
|
||||
// Display or save the Eigenfaces:
|
||||
for (int i = 0; i < min(10, W.cols); i++) {
|
||||
string msg = format("Eigenvalue #%d = %.5f", i, eigenvalues.at<double>(i));
|
||||
cout << msg << endl;
|
||||
// get eigenvector #i
|
||||
Mat ev = W.col(i).clone();
|
||||
// Reshape to original size & normalize to [0...255] for imshow.
|
||||
Mat grayscale = norm_0_255(ev.reshape(1, height));
|
||||
// Show the image & apply a Jet colormap for better sensing.
|
||||
Mat cgrayscale;
|
||||
applyColorMap(grayscale, cgrayscale, COLORMAP_JET);
|
||||
// Display or save:
|
||||
if(argc == 2) {
|
||||
imshow(format("eigenface_%d", i), cgrayscale);
|
||||
} else {
|
||||
imwrite(format("%s/eigenface_%d.png", output_folder.c_str(), i), norm_0_255(cgrayscale));
|
||||
}
|
||||
}
|
||||
// Display or save the image reconstruction at some predefined steps:
|
||||
for(int num_components = 10; num_components < 300; num_components+=15) {
|
||||
// slice the eigenvectors from the model
|
||||
Mat evs = Mat(W, Range::all(), Range(0, num_components));
|
||||
Mat projection = subspaceProject(evs, mean, images[0].reshape(1,1));
|
||||
Mat reconstruction = subspaceReconstruct(evs, mean, projection);
|
||||
// Normalize the result:
|
||||
reconstruction = norm_0_255(reconstruction.reshape(1, images[0].rows));
|
||||
// Display or save:
|
||||
if(argc == 2) {
|
||||
imshow(format("eigenface_reconstruction_%d", num_components), reconstruction);
|
||||
} else {
|
||||
imwrite(format("%s/eigenface_reconstruction_%d.png", output_folder.c_str(), num_components), reconstruction);
|
||||
}
|
||||
}
|
||||
// Display if we are not writing to an output folder:
|
||||
if(argc == 2) {
|
||||
waitKey(0);
|
||||
}
|
||||
return 0;
|
||||
}
|
@ -1,152 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2011. Philipp Wagner <bytefish[at]gmx[dot]de>.
|
||||
* Released to public domain under terms of the BSD Simplified license.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* * Neither the name of the organization nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
*
|
||||
* See <http://www.opensource.org/licenses/bsd-license>
|
||||
*/
|
||||
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/contrib.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/objdetect.hpp"
|
||||
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
#include <sstream>
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
static void read_csv(const string& filename, vector<Mat>& images, vector<int>& labels, char separator = ';') {
|
||||
std::ifstream file(filename.c_str(), ifstream::in);
|
||||
if (!file) {
|
||||
string error_message = "No valid input file was given, please check the given filename.";
|
||||
CV_Error(CV_StsBadArg, error_message);
|
||||
}
|
||||
string line, path, classlabel;
|
||||
while (getline(file, line)) {
|
||||
stringstream liness(line);
|
||||
getline(liness, path, separator);
|
||||
getline(liness, classlabel);
|
||||
if(!path.empty() && !classlabel.empty()) {
|
||||
images.push_back(imread(path, 0));
|
||||
labels.push_back(atoi(classlabel.c_str()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, const char *argv[]) {
|
||||
// Check for valid command line arguments, print usage
|
||||
// if no arguments were given.
|
||||
if (argc != 4) {
|
||||
cout << "usage: " << argv[0] << " </path/to/haar_cascade> </path/to/csv.ext> </path/to/device id>" << endl;
|
||||
cout << "\t </path/to/haar_cascade> -- Path to the Haar Cascade for face detection." << endl;
|
||||
cout << "\t </path/to/csv.ext> -- Path to the CSV file with the face database." << endl;
|
||||
cout << "\t <device id> -- The webcam device id to grab frames from." << endl;
|
||||
exit(1);
|
||||
}
|
||||
// Get the path to your CSV:
|
||||
string fn_haar = string(argv[1]);
|
||||
string fn_csv = string(argv[2]);
|
||||
int deviceId = atoi(argv[3]);
|
||||
// These vectors hold the images and corresponding labels:
|
||||
vector<Mat> images;
|
||||
vector<int> labels;
|
||||
// Read in the data (fails if no valid input filename is given, but you'll get an error message):
|
||||
try {
|
||||
read_csv(fn_csv, images, labels);
|
||||
} catch (cv::Exception& e) {
|
||||
cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
|
||||
// nothing more we can do
|
||||
exit(1);
|
||||
}
|
||||
// Get the height from the first image. We'll need this
|
||||
// later in code to reshape the images to their original
|
||||
// size AND we need to reshape incoming faces to this size:
|
||||
int im_width = images[0].cols;
|
||||
int im_height = images[0].rows;
|
||||
// Create a FaceRecognizer and train it on the given images:
|
||||
Ptr<FaceRecognizer> model = createFisherFaceRecognizer();
|
||||
model->train(images, labels);
|
||||
// That's it for learning the Face Recognition model. You now
|
||||
// need to create the classifier for the task of Face Detection.
|
||||
// We are going to use the haar cascade you have specified in the
|
||||
// command line arguments:
|
||||
//
|
||||
CascadeClassifier haar_cascade;
|
||||
haar_cascade.load(fn_haar);
|
||||
// Get a handle to the Video device:
|
||||
VideoCapture cap(deviceId);
|
||||
// Check if we can use this device at all:
|
||||
if(!cap.isOpened()) {
|
||||
cerr << "Capture Device ID " << deviceId << "cannot be opened." << endl;
|
||||
return -1;
|
||||
}
|
||||
// Holds the current frame from the Video device:
|
||||
Mat frame;
|
||||
for(;;) {
|
||||
cap >> frame;
|
||||
// Clone the current frame:
|
||||
Mat original = frame.clone();
|
||||
// Convert the current frame to grayscale:
|
||||
Mat gray;
|
||||
cvtColor(original, gray, CV_BGR2GRAY);
|
||||
// Find the faces in the frame:
|
||||
vector< Rect_<int> > faces;
|
||||
haar_cascade.detectMultiScale(gray, faces);
|
||||
// At this point you have the position of the faces in
|
||||
// faces. Now we'll get the faces, make a prediction and
|
||||
// annotate it in the video. Cool or what?
|
||||
for(int i = 0; i < faces.size(); i++) {
|
||||
// Process face by face:
|
||||
Rect face_i = faces[i];
|
||||
// Crop the face from the image. So simple with OpenCV C++:
|
||||
Mat face = gray(face_i);
|
||||
// Resizing the face is necessary for Eigenfaces and Fisherfaces. You can easily
|
||||
// verify this, by reading through the face recognition tutorial coming with OpenCV.
|
||||
// Resizing IS NOT NEEDED for Local Binary Patterns Histograms, so preparing the
|
||||
// input data really depends on the algorithm used.
|
||||
//
|
||||
// I strongly encourage you to play around with the algorithms. See which work best
|
||||
// in your scenario, LBPH should always be a contender for robust face recognition.
|
||||
//
|
||||
// Since I am showing the Fisherfaces algorithm here, I also show how to resize the
|
||||
// face you have just found:
|
||||
Mat face_resized;
|
||||
cv::resize(face, face_resized, Size(im_width, im_height), 1.0, 1.0, INTER_CUBIC);
|
||||
// Now perform the prediction, see how easy that is:
|
||||
int prediction = model->predict(face_resized);
|
||||
// And finally write all we've found out to the original image!
|
||||
// First of all draw a green rectangle around the detected face:
|
||||
rectangle(original, face_i, CV_RGB(0, 255,0), 1);
|
||||
// Create the text we will annotate the box with:
|
||||
string box_text = format("Prediction = %d", prediction);
|
||||
// Calculate the position for annotated text (make sure we don't
|
||||
// put illegal values in there):
|
||||
int pos_x = std::max(face_i.tl().x - 10, 0);
|
||||
int pos_y = std::max(face_i.tl().y - 10, 0);
|
||||
// And now put it into the image:
|
||||
putText(original, box_text, Point(pos_x, pos_y), FONT_HERSHEY_PLAIN, 1.0, CV_RGB(0,255,0), 2.0);
|
||||
}
|
||||
// Show the result:
|
||||
imshow("face_recognizer", original);
|
||||
// And display it:
|
||||
char key = (char) waitKey(20);
|
||||
// Exit this loop on escape:
|
||||
if(key == 27)
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
@ -1,233 +0,0 @@
|
||||
Gender Classification with OpenCV
|
||||
=================================
|
||||
|
||||
.. contents:: Table of Contents
|
||||
:depth: 3
|
||||
|
||||
Introduction
|
||||
------------
|
||||
|
||||
A lot of people interested in face recognition, also want to know how to perform image classification tasks like:
|
||||
|
||||
* Gender Classification (Gender Detection)
|
||||
* Emotion Classification (Emotion Detection)
|
||||
* Glasses Classification (Glasses Detection)
|
||||
* ...
|
||||
|
||||
This is has become very, very easy with the new :ocv:class:`FaceRecognizer` class. In this tutorial I'll show you how to perform gender classification with OpenCV on a set of face images. You'll also learn how to align your images to enhance the recognition results. If you want to do emotion classification instead of gender classification, all you need to do is to update is your training data and the configuration you pass to the demo.
|
||||
|
||||
Prerequisites
|
||||
--------------
|
||||
|
||||
For gender classification of faces, you'll need some images of male and female faces first. I've decided to search faces of celebrities using `Google Images <http://www.google.com/images>`_ with the faces filter turned on (my god, they have great algorithms at `Google <http://www.google.com>`_!). My database has 8 male and 5 female subjects, each with 10 images. Here are the names, if you don't know who to search:
|
||||
|
||||
* Angelina Jolie
|
||||
* Arnold Schwarzenegger
|
||||
* Brad Pitt
|
||||
* Emma Watson
|
||||
* George Clooney
|
||||
* Jennifer Lopez
|
||||
* Johnny Depp
|
||||
* Justin Timberlake
|
||||
* Katy Perry
|
||||
* Keanu Reeves
|
||||
* Naomi Watts
|
||||
* Patrick Stewart
|
||||
* Tom Cruise
|
||||
|
||||
Once you have acquired some images, you'll need to read them. In the demo application I have decided to read the images from a very simple CSV file. Why? Because it's the simplest platform-independent approach I can think of. However, if you know a simpler solution please ping me about it. Basically all the CSV file needs to contain are lines composed of a ``filename`` followed by a ``;`` followed by the ``label`` (as *integer number*), making up a line like this:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
/path/to/image.ext;0
|
||||
|
||||
Let's dissect the line. ``/path/to/image.ext`` is the path to an image, probably something like this if you are in Windows: ``C:/faces/person0/image0.jpg``. Then there is the separator ``;`` and finally we assign a label ``0`` to the image. Think of the label as the subject (the person, the gender or whatever comes to your mind). In the gender classification scenario, the label is the gender the person has. I'll give the label ``0`` to *male* persons and the label ``1`` is for *female* subjects. So my CSV file looks like this:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
/home/philipp/facerec/data/gender/male/keanu_reeves/keanu_reeves_01.jpg;0
|
||||
/home/philipp/facerec/data/gender/male/keanu_reeves/keanu_reeves_02.jpg;0
|
||||
/home/philipp/facerec/data/gender/male/keanu_reeves/keanu_reeves_03.jpg;0
|
||||
...
|
||||
/home/philipp/facerec/data/gender/female/katy_perry/katy_perry_01.jpg;1
|
||||
/home/philipp/facerec/data/gender/female/katy_perry/katy_perry_02.jpg;1
|
||||
/home/philipp/facerec/data/gender/female/katy_perry/katy_perry_03.jpg;1
|
||||
...
|
||||
/home/philipp/facerec/data/gender/male/brad_pitt/brad_pitt_01.jpg;0
|
||||
/home/philipp/facerec/data/gender/male/brad_pitt/brad_pitt_02.jpg;0
|
||||
/home/philipp/facerec/data/gender/male/brad_pitt/brad_pitt_03.jpg;0
|
||||
...
|
||||
/home/philipp/facerec/data/gender/female/emma_watson/emma_watson_08.jpg;1
|
||||
/home/philipp/facerec/data/gender/female/emma_watson/emma_watson_02.jpg;1
|
||||
/home/philipp/facerec/data/gender/female/emma_watson/emma_watson_03.jpg;1
|
||||
|
||||
All images for this example were chosen to have a frontal face perspective. They have been cropped, scaled and rotated to be aligned at the eyes, just like this set of George Clooney images:
|
||||
|
||||
.. image:: ../img/tutorial/gender_classification/clooney_set.png
|
||||
:align: center
|
||||
|
||||
You really don't want to create the CSV file by hand. And you really don't want scale, rotate & translate the images manually. I have prepared you two Python scripts ``create_csv.py`` and ``crop_face.py``, you can find them in the ``src`` folder coming with this documentation. You'll see how to use them in the :ref:`appendixfgc`.
|
||||
|
||||
Fisherfaces for Gender Classification
|
||||
--------------------------------------
|
||||
|
||||
If you want to decide whether a person is *male* or *female*, you have to learn the discriminative features of both classes. The Eigenfaces method is based on the Principal Component Analysis, which is an unsupervised statistical model and not suitable for this task. Please see the Face Recognition tutorial for insights into the algorithms. The Fisherfaces instead yields a class-specific linear projection, so it is much better suited for the gender classification task. `http://www.bytefish.de/blog/gender_classification <http://www.bytefish.de/blog/gender_classification>`_ shows the recognition rate of the Fisherfaces method for gender classification.
|
||||
|
||||
The Fisherfaces method achieves a 98% recognition rate in a subject-independent cross-validation. A subject-independent cross-validation means *images of the person under test are never used for learning the model*. And could you believe it: you can simply use the facerec_fisherfaces demo, that's inlcuded in OpenCV.
|
||||
|
||||
Fisherfaces in OpenCV
|
||||
---------------------
|
||||
|
||||
The source code for this demo application is also available in the ``src`` folder coming with this documentation:
|
||||
|
||||
* :download:`src/facerec_fisherfaces.cpp <../src/facerec_fisherfaces.cpp>`
|
||||
|
||||
.. literalinclude:: ../src/facerec_fisherfaces.cpp
|
||||
:language: cpp
|
||||
:linenos:
|
||||
|
||||
Running the Demo
|
||||
----------------
|
||||
|
||||
If you are in Windows, then simply start the demo by running (from command line):
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
facerec_fisherfaces.exe C:/path/to/your/csv.ext
|
||||
|
||||
If you are in Linux, then simply start the demo by running:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
./facerec_fisherfaces /path/to/your/csv.ext
|
||||
|
||||
If you don't want to display the images, but save them, then pass the desired path to the demo. It works like this in Windows:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
facerec_fisherfaces.exe C:/path/to/your/csv.ext C:/path/to/store/results/at
|
||||
|
||||
And in Linux:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
./facerec_fisherfaces /path/to/your/csv.ext /path/to/store/results/at
|
||||
|
||||
Results
|
||||
-------
|
||||
|
||||
If you run the program with your CSV file as parameter, you'll see the Fisherface that separates between male and female images. I've decided to apply a Jet colormap in this demo, so you can see which features the method identifies:
|
||||
|
||||
.. image:: ../img/tutorial/gender_classification/fisherface_0.png
|
||||
|
||||
The demo also shows the average face of the male and female training images you have passed:
|
||||
|
||||
.. image:: ../img/tutorial/gender_classification/mean.png
|
||||
|
||||
Moreover it the demo should yield the prediction for the correct gender:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
Predicted class = 1 / Actual class = 1.
|
||||
|
||||
And for advanced users I have also shown the Eigenvalue for the Fisherface:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
Eigenvalue #0 = 152.49493
|
||||
|
||||
And the Fisherfaces reconstruction:
|
||||
|
||||
.. image:: ../img/tutorial/gender_classification/fisherface_reconstruction_0.png
|
||||
|
||||
I hope this gives you an idea how to approach gender classification and the other image classification tasks.
|
||||
|
||||
.. _appendixfgc:
|
||||
|
||||
Appendix
|
||||
--------
|
||||
|
||||
Creating the CSV File
|
||||
+++++++++++++++++++++
|
||||
|
||||
You don't really want to create the CSV file by hand. I have prepared you a little Python script ``create_csv.py`` (you find it at ``/src/create_csv.py`` coming with this tutorial) that automatically creates you a CSV file. If you have your images in hierarchie like this (``/basepath/<subject>/<image.ext>``):
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
philipp@mango:~/facerec/data/at$ tree
|
||||
.
|
||||
|-- s1
|
||||
| |-- 1.pgm
|
||||
| |-- ...
|
||||
| |-- 10.pgm
|
||||
|-- s2
|
||||
| |-- 1.pgm
|
||||
| |-- ...
|
||||
| |-- 10.pgm
|
||||
...
|
||||
|-- s40
|
||||
| |-- 1.pgm
|
||||
| |-- ...
|
||||
| |-- 10.pgm
|
||||
|
||||
|
||||
Then simply call ``create_csv.py`` with the path to the folder, just like this and you could save the output:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
philipp@mango:~/facerec/data$ python create_csv.py
|
||||
at/s13/2.pgm;0
|
||||
at/s13/7.pgm;0
|
||||
at/s13/6.pgm;0
|
||||
at/s13/9.pgm;0
|
||||
at/s13/5.pgm;0
|
||||
at/s13/3.pgm;0
|
||||
at/s13/4.pgm;0
|
||||
at/s13/10.pgm;0
|
||||
at/s13/8.pgm;0
|
||||
at/s13/1.pgm;0
|
||||
at/s17/2.pgm;1
|
||||
at/s17/7.pgm;1
|
||||
at/s17/6.pgm;1
|
||||
at/s17/9.pgm;1
|
||||
at/s17/5.pgm;1
|
||||
at/s17/3.pgm;1
|
||||
[...]
|
||||
|
||||
Here is the script, if you can't find it:
|
||||
|
||||
.. literalinclude:: ../src/create_csv.py
|
||||
:language: python
|
||||
:linenos:
|
||||
|
||||
Aligning Face Images
|
||||
++++++++++++++++++++
|
||||
|
||||
An accurate alignment of your image data is especially important in tasks like emotion detection, were you need as much detail as possible. Believe me... You don't want to do this by hand. So I've prepared you a tiny Python script. The code is really easy to use. To scale, rotate and crop the face image you just need to call *CropFace(image, eye_left, eye_right, offset_pct, dest_sz)*, where:
|
||||
|
||||
* *eye_left* is the position of the left eye
|
||||
* *eye_right* is the position of the right eye
|
||||
* *offset_pct* is the percent of the image you want to keep next to the eyes (horizontal, vertical direction)
|
||||
* *dest_sz* is the size of the output image
|
||||
|
||||
If you are using the same *offset_pct* and *dest_sz* for your images, they are all aligned at the eyes.
|
||||
|
||||
.. literalinclude:: ../src/crop_face.py
|
||||
:language: python
|
||||
:linenos:
|
||||
|
||||
Imagine we are given `this photo of Arnold Schwarzenegger <http://en.wikipedia.org/wiki/File:Arnold_Schwarzenegger_edit%28ws%29.jpg>`_, which is under a Public Domain license. The (x,y)-position of the eyes is approximately *(252,364)* for the left and *(420,366)* for the right eye. Now you only need to define the horizontal offset, vertical offset and the size your scaled, rotated & cropped face should have.
|
||||
|
||||
Here are some examples:
|
||||
|
||||
+---------------------------------+----------------------------------------------------------------------------+
|
||||
| Configuration | Cropped, Scaled, Rotated Face |
|
||||
+=================================+============================================================================+
|
||||
| 0.1 (10%), 0.1 (10%), (200,200) | .. image:: ../img/tutorial/gender_classification/arnie_10_10_200_200.jpg |
|
||||
+---------------------------------+----------------------------------------------------------------------------+
|
||||
| 0.2 (20%), 0.2 (20%), (200,200) | .. image:: ../img/tutorial/gender_classification/arnie_20_20_200_200.jpg |
|
||||
+---------------------------------+----------------------------------------------------------------------------+
|
||||
| 0.3 (30%), 0.3 (30%), (200,200) | .. image:: ../img/tutorial/gender_classification/arnie_30_30_200_200.jpg |
|
||||
+---------------------------------+----------------------------------------------------------------------------+
|
||||
| 0.2 (20%), 0.2 (20%), (70,70) | .. image:: ../img/tutorial/gender_classification/arnie_20_20_70_70.jpg |
|
||||
+---------------------------------+----------------------------------------------------------------------------+
|
@ -1,46 +0,0 @@
|
||||
Saving and Loading a FaceRecognizer
|
||||
===================================
|
||||
|
||||
Introduction
|
||||
------------
|
||||
|
||||
Saving and loading a :ocv:class:`FaceRecognizer` is very important. Training a FaceRecognizer can be a very time-intense task, plus it's often impossible to ship the whole face database to the user of your product. The task of saving and loading a FaceRecognizer is easy with :ocv:class:`FaceRecognizer`. You only have to call :ocv:func:`FaceRecognizer::load` for loading and :ocv:func:`FaceRecognizer::save` for saving a :ocv:class:`FaceRecognizer`.
|
||||
|
||||
I'll adapt the Eigenfaces example from the :doc:`../facerec_tutorial`: Imagine we want to learn the Eigenfaces of the `AT&T Facedatabase <http://www.cl.cam.ac.uk/research/dtg/attarchive/facedatabase.html>`_, store the model to a YAML file and then load it again.
|
||||
|
||||
From the loaded model, we'll get a prediction, show the mean, Eigenfaces and the image reconstruction.
|
||||
|
||||
Using FaceRecognizer::save and FaceRecognizer::load
|
||||
-----------------------------------------------------
|
||||
|
||||
The source code for this demo application is also available in the ``src`` folder coming with this documentation:
|
||||
|
||||
* :download:`src/facerec_save_load.cpp <../src/facerec_save_load.cpp>`
|
||||
|
||||
.. literalinclude:: ../src/facerec_save_load.cpp
|
||||
:language: cpp
|
||||
:linenos:
|
||||
|
||||
Results
|
||||
-------
|
||||
|
||||
``eigenfaces_at.yml`` then contains the model state, we'll simply look at the first 10 lines with ``head eigenfaces_at.yml``:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
philipp@mango:~/github/libfacerec-build$ head eigenfaces_at.yml
|
||||
%YAML:1.0
|
||||
num_components: 399
|
||||
mean: !!opencv-matrix
|
||||
rows: 1
|
||||
cols: 10304
|
||||
dt: d
|
||||
data: [ 8.5558897243107765e+01, 8.5511278195488714e+01,
|
||||
8.5854636591478695e+01, 8.5796992481203006e+01,
|
||||
8.5952380952380949e+01, 8.6162907268170414e+01,
|
||||
8.6082706766917283e+01, 8.5776942355889716e+01,
|
||||
|
||||
And here is the Reconstruction, which is the same as the original:
|
||||
|
||||
.. image:: ../img/eigenface_reconstruction_opencv.png
|
||||
:align: center
|
@ -1,207 +0,0 @@
|
||||
Face Recognition in Videos with OpenCV
|
||||
=======================================
|
||||
|
||||
.. contents:: Table of Contents
|
||||
:depth: 3
|
||||
|
||||
Introduction
|
||||
------------
|
||||
|
||||
Whenever you hear the term *face recognition*, you instantly think of surveillance in videos. So performing face recognition in videos (e.g. webcam) is one of the most requested features I have got. I have heard your cries, so here it is. An application, that shows you how to do face recognition in videos! For the face detection part we'll use the awesome :ocv:class:`CascadeClassifier` and we'll use :ocv:class:`FaceRecognizer` for face recognition. This example uses the Fisherfaces method for face recognition, because it is robust against large changes in illumination.
|
||||
|
||||
Here is what the final application looks like. As you can see I am only writing the id of the recognized person above the detected face (by the way this id is Arnold Schwarzenegger for my data set):
|
||||
|
||||
.. image:: ../img/tutorial/facerec_video/facerec_video.png
|
||||
:align: center
|
||||
:scale: 70%
|
||||
|
||||
This demo is a basis for your research and it shows you how to implement face recognition in videos. You probably want to extend the application and make it more sophisticated: You could combine the id with the name, then show the confidence of the prediction, recognize the emotion... and and and. But before you send mails, asking what these Haar-Cascade thing is or what a CSV is: Make sure you have read the entire tutorial. It's all explained in here. If you just want to scroll down to the code, please note:
|
||||
|
||||
* The available Haar-Cascades for face detection are located in the ``data`` folder of your OpenCV installation! One of the available Haar-Cascades for face detection is for example ``/path/to/opencv/data/haarcascades/haarcascade_frontalface_default.xml``.
|
||||
|
||||
I encourage you to experiment with the application. Play around with the available :ocv:class:`FaceRecognizer` implementations, try the available cascades in OpenCV and see if you can improve your results!
|
||||
|
||||
Prerequisites
|
||||
--------------
|
||||
|
||||
You want to do face recognition, so you need some face images to learn a :ocv:class:`FaceRecognizer` on. I have decided to reuse the images from the gender classification example: :doc:`facerec_gender_classification`.
|
||||
|
||||
I have the following celebrities in my training data set:
|
||||
|
||||
* Angelina Jolie
|
||||
* Arnold Schwarzenegger
|
||||
* Brad Pitt
|
||||
* George Clooney
|
||||
* Johnny Depp
|
||||
* Justin Timberlake
|
||||
* Katy Perry
|
||||
* Keanu Reeves
|
||||
* Patrick Stewart
|
||||
* Tom Cruise
|
||||
|
||||
In the demo I have decided to read the images from a very simple CSV file. Why? Because it's the simplest platform-independent approach I can think of. However, if you know a simpler solution please ping me about it. Basically all the CSV file needs to contain are lines composed of a ``filename`` followed by a ``;`` followed by the ``label`` (as *integer number*), making up a line like this:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
/path/to/image.ext;0
|
||||
|
||||
Let's dissect the line. ``/path/to/image.ext`` is the path to an image, probably something like this if you are in Windows: ``C:/faces/person0/image0.jpg``. Then there is the separator ``;`` and finally we assign a label ``0`` to the image. Think of the label as the subject (the person, the gender or whatever comes to your mind). In the face recognition scenario, the label is the person this image belongs to. In the gender classification scenario, the label is the gender the person has. So my CSV file looks like this:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
/home/philipp/facerec/data/c/keanu_reeves/keanu_reeves_01.jpg;0
|
||||
/home/philipp/facerec/data/c/keanu_reeves/keanu_reeves_02.jpg;0
|
||||
/home/philipp/facerec/data/c/keanu_reeves/keanu_reeves_03.jpg;0
|
||||
...
|
||||
/home/philipp/facerec/data/c/katy_perry/katy_perry_01.jpg;1
|
||||
/home/philipp/facerec/data/c/katy_perry/katy_perry_02.jpg;1
|
||||
/home/philipp/facerec/data/c/katy_perry/katy_perry_03.jpg;1
|
||||
...
|
||||
/home/philipp/facerec/data/c/brad_pitt/brad_pitt_01.jpg;2
|
||||
/home/philipp/facerec/data/c/brad_pitt/brad_pitt_02.jpg;2
|
||||
/home/philipp/facerec/data/c/brad_pitt/brad_pitt_03.jpg;2
|
||||
...
|
||||
/home/philipp/facerec/data/c1/crop_arnold_schwarzenegger/crop_08.jpg;6
|
||||
/home/philipp/facerec/data/c1/crop_arnold_schwarzenegger/crop_05.jpg;6
|
||||
/home/philipp/facerec/data/c1/crop_arnold_schwarzenegger/crop_02.jpg;6
|
||||
/home/philipp/facerec/data/c1/crop_arnold_schwarzenegger/crop_03.jpg;6
|
||||
|
||||
All images for this example were chosen to have a frontal face perspective. They have been cropped, scaled and rotated to be aligned at the eyes, just like this set of George Clooney images:
|
||||
|
||||
.. image:: ../img/tutorial/gender_classification/clooney_set.png
|
||||
:align: center
|
||||
|
||||
Face Recongition from Videos
|
||||
-----------------------------
|
||||
|
||||
The source code for the demo is available in the ``src`` folder coming with this documentation:
|
||||
|
||||
* :download:`src/facerec_video.cpp <../src/facerec_video.cpp>`
|
||||
|
||||
This demo uses the :ocv:class:`CascadeClassifier`:
|
||||
|
||||
.. literalinclude:: ../src/facerec_video.cpp
|
||||
:language: cpp
|
||||
:linenos:
|
||||
|
||||
Running the Demo
|
||||
----------------
|
||||
|
||||
You'll need:
|
||||
|
||||
* The path to a valid Haar-Cascade for detecting a face with a :ocv:class:`CascadeClassifier`.
|
||||
* The path to a valid CSV File for learning a :ocv:class:`FaceRecognizer`.
|
||||
* A webcam and its device id (you don't know the device id? Simply start from 0 on and see what happens).
|
||||
|
||||
If you are in Windows, then simply start the demo by running (from command line):
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
facerec_video.exe <C:/path/to/your/haar_cascade.xml> <C:/path/to/your/csv.ext> <video device>
|
||||
|
||||
If you are in Linux, then simply start the demo by running:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
./facerec_video </path/to/your/haar_cascade.xml> </path/to/your/csv.ext> <video device>
|
||||
|
||||
An example. If the haar-cascade is at ``C:/opencv/data/haarcascades/haarcascade_frontalface_default.xml``, the CSV file is at ``C:/facerec/data/celebrities.txt`` and I have a webcam with deviceId ``1``, then I would call the demo with:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
facerec_video.exe C:/opencv/data/haarcascades/haarcascade_frontalface_default.xml C:/facerec/data/celebrities.txt 1
|
||||
|
||||
That's it.
|
||||
|
||||
Results
|
||||
-------
|
||||
|
||||
Enjoy!
|
||||
|
||||
Appendix
|
||||
--------
|
||||
|
||||
Creating the CSV File
|
||||
+++++++++++++++++++++
|
||||
|
||||
You don't really want to create the CSV file by hand. I have prepared you a little Python script ``create_csv.py`` (you find it at ``/src/create_csv.py`` coming with this tutorial) that automatically creates you a CSV file. If you have your images in hierarchie like this (``/basepath/<subject>/<image.ext>``):
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
philipp@mango:~/facerec/data/at$ tree
|
||||
.
|
||||
|-- s1
|
||||
| |-- 1.pgm
|
||||
| |-- ...
|
||||
| |-- 10.pgm
|
||||
|-- s2
|
||||
| |-- 1.pgm
|
||||
| |-- ...
|
||||
| |-- 10.pgm
|
||||
...
|
||||
|-- s40
|
||||
| |-- 1.pgm
|
||||
| |-- ...
|
||||
| |-- 10.pgm
|
||||
|
||||
|
||||
Then simply call ``create_csv.py`` with the path to the folder, just like this and you could save the output:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
philipp@mango:~/facerec/data$ python create_csv.py
|
||||
at/s13/2.pgm;0
|
||||
at/s13/7.pgm;0
|
||||
at/s13/6.pgm;0
|
||||
at/s13/9.pgm;0
|
||||
at/s13/5.pgm;0
|
||||
at/s13/3.pgm;0
|
||||
at/s13/4.pgm;0
|
||||
at/s13/10.pgm;0
|
||||
at/s13/8.pgm;0
|
||||
at/s13/1.pgm;0
|
||||
at/s17/2.pgm;1
|
||||
at/s17/7.pgm;1
|
||||
at/s17/6.pgm;1
|
||||
at/s17/9.pgm;1
|
||||
at/s17/5.pgm;1
|
||||
at/s17/3.pgm;1
|
||||
[...]
|
||||
|
||||
Here is the script, if you can't find it:
|
||||
|
||||
.. literalinclude:: ../src/create_csv.py
|
||||
:language: python
|
||||
:linenos:
|
||||
|
||||
Aligning Face Images
|
||||
++++++++++++++++++++
|
||||
|
||||
An accurate alignment of your image data is especially important in tasks like emotion detection, were you need as much detail as possible. Believe me... You don't want to do this by hand. So I've prepared you a tiny Python script. The code is really easy to use. To scale, rotate and crop the face image you just need to call *CropFace(image, eye_left, eye_right, offset_pct, dest_sz)*, where:
|
||||
|
||||
* *eye_left* is the position of the left eye
|
||||
* *eye_right* is the position of the right eye
|
||||
* *offset_pct* is the percent of the image you want to keep next to the eyes (horizontal, vertical direction)
|
||||
* *dest_sz* is the size of the output image
|
||||
|
||||
If you are using the same *offset_pct* and *dest_sz* for your images, they are all aligned at the eyes.
|
||||
|
||||
.. literalinclude:: ../src/crop_face.py
|
||||
:language: python
|
||||
:linenos:
|
||||
|
||||
Imagine we are given `this photo of Arnold Schwarzenegger <http://en.wikipedia.org/wiki/File:Arnold_Schwarzenegger_edit%28ws%29.jpg>`_, which is under a Public Domain license. The (x,y)-position of the eyes is approximately *(252,364)* for the left and *(420,366)* for the right eye. Now you only need to define the horizontal offset, vertical offset and the size your scaled, rotated & cropped face should have.
|
||||
|
||||
Here are some examples:
|
||||
|
||||
+---------------------------------+----------------------------------------------------------------------------+
|
||||
| Configuration | Cropped, Scaled, Rotated Face |
|
||||
+=================================+============================================================================+
|
||||
| 0.1 (10%), 0.1 (10%), (200,200) | .. image:: ../img/tutorial/gender_classification/arnie_10_10_200_200.jpg |
|
||||
+---------------------------------+----------------------------------------------------------------------------+
|
||||
| 0.2 (20%), 0.2 (20%), (200,200) | .. image:: ../img/tutorial/gender_classification/arnie_20_20_200_200.jpg |
|
||||
+---------------------------------+----------------------------------------------------------------------------+
|
||||
| 0.3 (30%), 0.3 (30%), (200,200) | .. image:: ../img/tutorial/gender_classification/arnie_30_30_200_200.jpg |
|
||||
+---------------------------------+----------------------------------------------------------------------------+
|
||||
| 0.2 (20%), 0.2 (20%), (70,70) | .. image:: ../img/tutorial/gender_classification/arnie_20_20_70_70.jpg |
|
||||
+---------------------------------+----------------------------------------------------------------------------+
|
@ -1,232 +0,0 @@
|
||||
OpenFABMAP
|
||||
========================================
|
||||
|
||||
.. highlight:: cpp
|
||||
|
||||
The openFABMAP package has been integrated into OpenCV from the openFABMAP <http://code.google.com/p/openfabmap/> project [ICRA2011]_. OpenFABMAP is an open and modifiable code-source which implements the Fast Appearance-based Mapping algorithm (FAB-MAP) developed by Mark Cummins and Paul Newman. The algorithms used in openFABMAP were developed using only the relevant FAB-MAP publications.
|
||||
|
||||
FAB-MAP is an approach to appearance-based place recognition. FAB-MAP compares images of locations that have been visited and determines the probability of re-visiting a location, as well as providing a measure of the probability of being at a new, previously unvisited location. Camera images form the sole input to the system, from which visual bag-of-words models are formed through the extraction of appearance-based (e.g. SURF) features.
|
||||
|
||||
openFABMAP requires training data (e.g. a collection of images from a similar but not identical environment) to construct a visual vocabulary for the visual bag-of-words model, along with a Chow-Liu tree representation of feature likelihood and for use in the Sampled new place method (see below).
|
||||
|
||||
.. note::
|
||||
|
||||
* An example using the openFABMAP package can be found at opencv_source_code/samples/cpp/fabmap_sample.cpp
|
||||
|
||||
of2::FabMap
|
||||
--------------------
|
||||
|
||||
.. ocv:class:: of2::FabMap
|
||||
|
||||
The main FabMap class performs the comparison between visual bags-of-words extracted from one or more images. The FabMap class is instantiated as one of the four inherited FabMap classes (FabMap1, FabMapLUT, FabMapFBO, FabMap2). Each inherited class performs the comparison differently based on algorithm iterations as published (see each class below for specifics). A Chow-Liu tree, detector model parameters and some option flags are common to all Fabmap variants and are supplied on class creation. Training data (visual bag-of-words) is supplied to the class if using the SAMPLED new place method. Test data (visual bag-of-words) is supplied as images to which query bag-of-words are compared against. The common flags are listed below: ::
|
||||
|
||||
enum {
|
||||
MEAN_FIELD,
|
||||
SAMPLED,
|
||||
NAIVE_BAYES,
|
||||
CHOW_LIU,
|
||||
MOTION_MODEL
|
||||
};
|
||||
|
||||
#. MEAN_FIELD: Use the Mean Field approximation to determine the new place likelihood (cannot be used for FabMap2).
|
||||
#. SAMPLED: Use the Sampled approximation to determine the new place likelihood. Requires training data (see below).
|
||||
#. NAIVE_BAYES: Assume a naive Bayes approximation to feature distribution (i.e. all features are independent). Note that a Chow-Liu tree is still required but only the absolute word probabilities are used, feature co-occurrance information is discarded.
|
||||
#. CHOW_LIU: Use the full Chow-Liu tree to approximate feature distribution.
|
||||
#. MOTION_MODEL: Update the location distribution using the previous distribution as a (weak) prior. Used for matching in sequences (i.e. successive video frames).
|
||||
|
||||
Training Data
|
||||
++++++++++++++++++++
|
||||
|
||||
Training data is required to use the SAMPLED new place method. The SAMPLED method was shown to have improved performance over the alternative MEAN_FIELD method. Training data can be added singularly or as a batch.
|
||||
|
||||
.. ocv:function:: virtual void addTraining(const Mat& queryImgDescriptor)
|
||||
|
||||
:param queryImgDescriptor: bag-of-words image descriptors stored as rows in a Mat
|
||||
|
||||
.. ocv:function:: virtual void addTraining(const vector<Mat>& queryImgDescriptors)
|
||||
|
||||
:param queryImgDescriptors: a vector containing multiple bag-of-words image descriptors
|
||||
|
||||
.. ocv:function:: const vector<Mat>& getTrainingImgDescriptors() const
|
||||
|
||||
Returns a vector containing multiple bag-of-words image descriptors
|
||||
|
||||
Test Data
|
||||
++++++++++++++++++++
|
||||
|
||||
Test Data is the database of images represented using bag-of-words models. When a compare function is called, each query point is compared to the test data.
|
||||
|
||||
.. ocv:function:: virtual void add(const Mat& queryImgDescriptor)
|
||||
|
||||
:param queryImgDescriptor: bag-of-words image descriptors stored as rows in a Mat
|
||||
|
||||
.. ocv:function:: virtual void add(const vector<Mat>& queryImgDescriptors)
|
||||
|
||||
:param queryImgDescriptors: a vector containing multiple bag-of-words image descriptors
|
||||
|
||||
.. ocv:function:: const vector<Mat>& getTestImgDescriptors() const
|
||||
|
||||
Returns a vector containing multiple bag-of-words image descriptors
|
||||
|
||||
Image Comparison
|
||||
++++++++++++++++++++
|
||||
|
||||
Image matching is performed calling the compare function. Query bag-of-words image descriptors are provided and compared to test data added to the FabMap class. Alternatively test data can be provided with the call to compare to which the comparison is performed. Results are written to the 'matches' argument.
|
||||
|
||||
.. ocv:function:: void compare(const Mat& queryImgDescriptor, vector<IMatch>& matches, bool addQuery = false, const Mat& mask = Mat())
|
||||
|
||||
:param queryImgDescriptor: bag-of-words image descriptors stored as rows in a Mat
|
||||
|
||||
:param matches: a vector of image match probabilities
|
||||
|
||||
:param addQuery: if true the queryImg Descriptor is added to the test data after the comparison is performed.
|
||||
|
||||
:param mask: *not implemented*
|
||||
|
||||
.. ocv:function:: void compare(const Mat& queryImgDescriptor, const Mat& testImgDescriptors, vector<IMatch>& matches, const Mat& mask = Mat())
|
||||
|
||||
:param testImgDescriptors: bag-of-words image descriptors stored as rows in a Mat
|
||||
|
||||
.. ocv:function:: void compare(const Mat& queryImgDescriptor, const vector<Mat>& testImgDescriptors, vector<IMatch>& matches, const Mat& mask = Mat())
|
||||
|
||||
:param testImgDescriptors: a vector of multiple bag-of-words image descriptors
|
||||
|
||||
.. ocv:function:: void compare(const vector<Mat>& queryImgDescriptors, vector<IMatch>& matches, bool addQuery = false, const Mat& mask = Mat())
|
||||
|
||||
:param queryImgDescriptors: a vector of multiple bag-of-words image descriptors
|
||||
|
||||
.. ocv:function:: void compare(const vector<Mat>& queryImgDescriptors, const vector<Mat>& testImgDescriptors, vector<IMatch>& matches, const Mat& mask = Mat())
|
||||
|
||||
|
||||
|
||||
FabMap classes
|
||||
++++++++++++++++++++
|
||||
|
||||
.. ocv:class:: FabMap1 : public FabMap
|
||||
|
||||
The original FAB-MAP algorithm without any computational improvements as published in [IJRR2008]_
|
||||
|
||||
.. ocv:function:: FabMap1::FabMap1(const Mat& clTree, double PzGe, double PzGNe, int flags, int numSamples = 0)
|
||||
|
||||
:param clTree: a Chow-Liu tree class
|
||||
|
||||
:param PzGe: the dector model recall. The probability of the feature detector extracting a feature from an object given it is in the scene. This is used to account for detector noise.
|
||||
|
||||
:param PzGNe: the dector model precision. The probability of the feature detector falsing extracting a feature representing an object that is not in the scene.
|
||||
|
||||
:param numSamples: the number of samples to use for the SAMPLED new place calculation
|
||||
|
||||
.. ocv:class:: FabMapLUT : public FabMap
|
||||
|
||||
The original FAB-MAP algorithm implemented as a look-up table for speed enhancements [ICRA2011]_
|
||||
|
||||
.. ocv:function:: FabMapLUT::FabMapLUT(const Mat& clTree, double PzGe, double PzGNe, int flags, int numSamples = 0, int precision = 6)
|
||||
|
||||
:param precision: the precision with which to store the pre-computed likelihoods
|
||||
|
||||
.. ocv:class:: FabMapFBO : public FabMap
|
||||
|
||||
The accelerated FAB-MAP using a 'fast bail-out' approach as in [TRO2010]_
|
||||
|
||||
.. ocv:function:: FabMapFBO::FabMapFBO(const Mat& clTree, double PzGe, double PzGNe, int flags, int numSamples = 0, double rejectionThreshold = 1e-8, double PsGd = 1e-8, int bisectionStart = 512, int bisectionIts = 9)
|
||||
|
||||
:param rejectionThreshold: images are not considered a match when the likelihood falls below the Bennett bound by the amount given by the rejectionThreshold. The threshold provides a speed/accuracy trade-off. A lower bound will be more accurate
|
||||
|
||||
:param PsGd: used to calculate the Bennett bound. Provides a speed/accuracy trade-off. A lower bound will be more accurate
|
||||
|
||||
:param bisectionStart: Used to estimate the bound using the bisection method. Must be larger than the largest expected difference between maximum and minimum image likelihoods
|
||||
|
||||
:param bisectionIts: The number of iterations for which to perform the bisection method
|
||||
|
||||
|
||||
.. ocv:class:: FabMap2 : public FabMap
|
||||
|
||||
The inverted index FAB-MAP as in [IJRR2010]_. This version of FAB-MAP is the fastest without any loss of accuracy.
|
||||
|
||||
.. ocv:function:: FabMap2::FabMap2(const Mat& clTree, double PzGe, double PzGNe, int flags)
|
||||
|
||||
.. [IJRR2008] M. Cummins and P. Newman, "FAB-MAP: Probabilistic Localization and Mapping in the Space of Appearance," The International Journal of Robotics Research, vol. 27(6), pp. 647-665, 2008
|
||||
|
||||
.. [TRO2010] M. Cummins and P. Newman, "Accelerating FAB-MAP with concentration inequalities," IEEE Transactions on Robotics, vol. 26(6), pp. 1042-1050, 2010
|
||||
|
||||
.. [IJRR2010] M. Cummins and P. Newman, "Appearance-only SLAM at large scale with FAB-MAP 2.0," The International Journal of Robotics Research, vol. 30(9), pp. 1100-1123, 2010
|
||||
|
||||
.. [ICRA2011] A. Glover, et al., "OpenFABMAP: An Open Source Toolbox for Appearance-based Loop Closure Detection," in IEEE International Conference on Robotics and Automation, St Paul, Minnesota, 2011
|
||||
|
||||
of2::IMatch
|
||||
--------------------
|
||||
|
||||
.. ocv:struct:: of2::IMatch
|
||||
|
||||
FAB-MAP comparison results are stored in a vector of IMatch structs. Each IMatch structure provides the index of the provided query bag-of-words, the index of the test bag-of-words, the raw log-likelihood of the match (independent of other comparisons), and the match probability (normalised over other comparison likelihoods).
|
||||
|
||||
::
|
||||
|
||||
struct IMatch {
|
||||
|
||||
IMatch() :
|
||||
queryIdx(-1), imgIdx(-1), likelihood(-DBL_MAX), match(-DBL_MAX) {
|
||||
}
|
||||
IMatch(int _queryIdx, int _imgIdx, double _likelihood, double _match) :
|
||||
queryIdx(_queryIdx), imgIdx(_imgIdx), likelihood(_likelihood), match(
|
||||
_match) {
|
||||
}
|
||||
|
||||
int queryIdx; //query index
|
||||
int imgIdx; //test index
|
||||
|
||||
double likelihood; //raw loglikelihood
|
||||
double match; //normalised probability
|
||||
|
||||
bool operator<(const IMatch& m) const {
|
||||
return match < m.match;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
of2::ChowLiuTree
|
||||
--------------------
|
||||
|
||||
.. ocv:class:: of2::ChowLiuTree
|
||||
|
||||
The Chow-Liu tree is a probabilistic model of the environment in terms of feature occurance and co-occurance. The Chow-Liu tree is a form of Bayesian network. FAB-MAP uses the model when calculating bag-of-words similarity by taking into account feature saliency. Training data is provided to the ChowLiuTree class in the form of bag-of-words image descriptors. The make function produces a cv::Mat that encodes the tree structure.
|
||||
|
||||
.. ocv:function:: of2::ChowLiuTree::ChowLiuTree()
|
||||
|
||||
.. ocv:function:: void of2::ChowLiuTree::add(const Mat& imgDescriptor)
|
||||
|
||||
:param imgDescriptor: bag-of-words image descriptors stored as rows in a Mat
|
||||
|
||||
.. ocv:function:: void of2::ChowLiuTree::add(const vector<Mat>& imgDescriptors)
|
||||
|
||||
:param imgDescriptors: a vector containing multiple bag-of-words image descriptors
|
||||
|
||||
.. ocv:function:: const vector<Mat>& of2::ChowLiuTree::getImgDescriptors() const
|
||||
|
||||
Returns a vector containing multiple bag-of-words image descriptors
|
||||
|
||||
.. ocv:function:: Mat of2::ChowLiuTree::make(double infoThreshold = 0.0)
|
||||
|
||||
:param infoThreshold: a threshold can be set to reduce the amount of memory used when making the Chow-Liu tree, which can occur with large vocabulary sizes. This function can fail if the threshold is set too high. If memory is an issue the value must be set by trial and error (~0.0005)
|
||||
|
||||
|
||||
of2::BOWMSCTrainer
|
||||
--------------------
|
||||
|
||||
.. ocv:class:: of2::BOWMSCTrainer : public of2::BOWTrainer
|
||||
|
||||
BOWMSCTrainer is a custom clustering algorithm used to produce the feature vocabulary required to create bag-of-words representations. The algorithm is an implementation of [AVC2007]_. Arguments against using K-means for the FAB-MAP algorithm are discussed in [IJRR2010]_. The BOWMSCTrainer inherits from the cv::BOWTrainer class, overwriting the cluster function.
|
||||
|
||||
.. ocv:function:: of2::BOWMSCTrainer::BOWMSCTrainer(double clusterSize = 0.4)
|
||||
|
||||
:param clusterSize: the specificity of the vocabulary produced. A smaller cluster size will instigate a larger vocabulary.
|
||||
|
||||
.. ocv:function:: virtual Mat of2::BOWMSCTrainer::cluster() const
|
||||
|
||||
Cluster using features added to the class
|
||||
|
||||
.. ocv:function:: virtual Mat of2::BOWMSCTrainer::cluster(const Mat& descriptors) const
|
||||
|
||||
:param descriptors: feature descriptors provided as rows of the Mat.
|
||||
|
||||
.. [AVC2007] Alexandra Teynor and Hans Burkhardt, "Fast Codebook Generation by Sequential Data Analysis for Object Classification", in Advances in Visual Computing, pp. 610-620, 2007
|
@ -1,115 +0,0 @@
|
||||
Stereo Correspondence
|
||||
========================================
|
||||
|
||||
.. highlight:: cpp
|
||||
|
||||
StereoVar
|
||||
----------
|
||||
|
||||
.. ocv:class:: StereoVar
|
||||
|
||||
Class for computing stereo correspondence using the variational matching algorithm ::
|
||||
|
||||
class StereoVar
|
||||
{
|
||||
StereoVar();
|
||||
StereoVar( int levels, double pyrScale,
|
||||
int nIt, int minDisp, int maxDisp,
|
||||
int poly_n, double poly_sigma, float fi,
|
||||
float lambda, int penalization, int cycle,
|
||||
int flags);
|
||||
virtual ~StereoVar();
|
||||
|
||||
virtual void operator()(InputArray left, InputArray right, OutputArray disp);
|
||||
|
||||
int levels;
|
||||
double pyrScale;
|
||||
int nIt;
|
||||
int minDisp;
|
||||
int maxDisp;
|
||||
int poly_n;
|
||||
double poly_sigma;
|
||||
float fi;
|
||||
float lambda;
|
||||
int penalization;
|
||||
int cycle;
|
||||
int flags;
|
||||
|
||||
...
|
||||
};
|
||||
|
||||
The class implements the modified S. G. Kosov algorithm [Publication] that differs from the original one as follows:
|
||||
|
||||
* The automatic initialization of method's parameters is added.
|
||||
|
||||
* The method of Smart Iteration Distribution (SID) is implemented.
|
||||
|
||||
* The support of Multi-Level Adaptation Technique (MLAT) is not included.
|
||||
|
||||
* The method of dynamic adaptation of method's parameters is not included.
|
||||
|
||||
StereoVar::StereoVar
|
||||
--------------------------
|
||||
|
||||
.. ocv:function:: StereoVar::StereoVar()
|
||||
|
||||
.. ocv:function:: StereoVar::StereoVar( int levels, double pyrScale, int nIt, int minDisp, int maxDisp, int poly_n, double poly_sigma, float fi, float lambda, int penalization, int cycle, int flags )
|
||||
|
||||
The constructor
|
||||
|
||||
:param levels: The number of pyramid layers, including the initial image. levels=1 means that no extra layers are created and only the original images are used. This parameter is ignored if flag USE_AUTO_PARAMS is set.
|
||||
|
||||
:param pyrScale: Specifies the image scale (<1) to build the pyramids for each image. pyrScale=0.5 means the classical pyramid, where each next layer is twice smaller than the previous. (This parameter is ignored if flag USE_AUTO_PARAMS is set).
|
||||
|
||||
:param nIt: The number of iterations the algorithm does at each pyramid level. (If the flag USE_SMART_ID is set, the number of iterations will be redistributed in such a way, that more iterations will be done on more coarser levels.)
|
||||
|
||||
:param minDisp: Minimum possible disparity value. Could be negative in case the left and right input images change places.
|
||||
|
||||
:param maxDisp: Maximum possible disparity value.
|
||||
|
||||
:param poly_n: Size of the pixel neighbourhood used to find polynomial expansion in each pixel. The larger values mean that the image will be approximated with smoother surfaces, yielding more robust algorithm and more blurred motion field. Typically, poly_n = 3, 5 or 7
|
||||
|
||||
:param poly_sigma: Standard deviation of the Gaussian that is used to smooth derivatives that are used as a basis for the polynomial expansion. For poly_n=5 you can set poly_sigma=1.1 , for poly_n=7 a good value would be poly_sigma=1.5
|
||||
|
||||
:param fi: The smoothness parameter, ot the weight coefficient for the smoothness term.
|
||||
|
||||
:param lambda: The threshold parameter for edge-preserving smoothness. (This parameter is ignored if PENALIZATION_CHARBONNIER or PENALIZATION_PERONA_MALIK is used.)
|
||||
|
||||
:param penalization: Possible values: PENALIZATION_TICHONOV - linear smoothness; PENALIZATION_CHARBONNIER - non-linear edge preserving smoothness; PENALIZATION_PERONA_MALIK - non-linear edge-enhancing smoothness. (This parameter is ignored if flag USE_AUTO_PARAMS is set).
|
||||
|
||||
:param cycle: Type of the multigrid cycle. Possible values: CYCLE_O and CYCLE_V for null- and v-cycles respectively. (This parameter is ignored if flag USE_AUTO_PARAMS is set).
|
||||
|
||||
:param flags: The operation flags; can be a combination of the following:
|
||||
|
||||
* USE_INITIAL_DISPARITY: Use the input flow as the initial flow approximation.
|
||||
|
||||
* USE_EQUALIZE_HIST: Use the histogram equalization in the pre-processing phase.
|
||||
|
||||
* USE_SMART_ID: Use the smart iteration distribution (SID).
|
||||
|
||||
* USE_AUTO_PARAMS: Allow the method to initialize the main parameters.
|
||||
|
||||
* USE_MEDIAN_FILTERING: Use the median filer of the solution in the post processing phase.
|
||||
|
||||
The first constructor initializes ``StereoVar`` with all the default parameters. So, you only have to set ``StereoVar::maxDisp`` and / or ``StereoVar::minDisp`` at minimum. The second constructor enables you to set each parameter to a custom value.
|
||||
|
||||
|
||||
|
||||
StereoVar::operator ()
|
||||
-----------------------
|
||||
|
||||
.. ocv:function:: void StereoVar::operator()( const Mat& left, const Mat& right, Mat& disp )
|
||||
|
||||
Computes disparity using the variational algorithm for a rectified stereo pair.
|
||||
|
||||
:param left: Left 8-bit single-channel or 3-channel image.
|
||||
|
||||
:param right: Right image of the same size and the same type as the left one.
|
||||
|
||||
:param disp: Output disparity map. It is a 8-bit signed single-channel image of the same size as the input image.
|
||||
|
||||
The method executes the variational algorithm on a rectified stereo pair. See ``stereo_match.cpp`` OpenCV sample on how to prepare images and call the method.
|
||||
|
||||
**Note**:
|
||||
|
||||
The method is not constant, so you should not use the same ``StereoVar`` instance from different threads simultaneously.
|
@ -1,638 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef __OPENCV_CONTRIB_HPP__
|
||||
#define __OPENCV_CONTRIB_HPP__
|
||||
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/features2d.hpp"
|
||||
#include "opencv2/objdetect.hpp"
|
||||
|
||||
namespace cv
|
||||
{
|
||||
class CV_EXPORTS Octree
|
||||
{
|
||||
public:
|
||||
struct Node
|
||||
{
|
||||
Node() { memset(this, 0, sizeof(Node)); }
|
||||
int begin, end;
|
||||
float x_min, x_max, y_min, y_max, z_min, z_max;
|
||||
int maxLevels;
|
||||
bool isLeaf;
|
||||
int children[8];
|
||||
};
|
||||
|
||||
Octree();
|
||||
Octree( const std::vector<Point3f>& points, int maxLevels = 10, int minPoints = 20 );
|
||||
virtual ~Octree();
|
||||
|
||||
virtual void buildTree( const std::vector<Point3f>& points, int maxLevels = 10, int minPoints = 20 );
|
||||
virtual void getPointsWithinSphere( const Point3f& center, float radius,
|
||||
std::vector<Point3f>& points ) const;
|
||||
const std::vector<Node>& getNodes() const { return nodes; }
|
||||
private:
|
||||
int minPoints;
|
||||
std::vector<Point3f> points;
|
||||
std::vector<Node> nodes;
|
||||
|
||||
virtual void buildNext(size_t node_ind);
|
||||
};
|
||||
|
||||
|
||||
class CV_EXPORTS Mesh3D
|
||||
{
|
||||
public:
|
||||
struct EmptyMeshException {};
|
||||
|
||||
Mesh3D();
|
||||
Mesh3D(const std::vector<Point3f>& vtx);
|
||||
~Mesh3D();
|
||||
|
||||
void buildOctree();
|
||||
void clearOctree();
|
||||
float estimateResolution(float tryRatio = 0.1f);
|
||||
void computeNormals(float normalRadius, int minNeighbors = 20);
|
||||
void computeNormals(const std::vector<int>& subset, float normalRadius, int minNeighbors = 20);
|
||||
|
||||
void writeAsVrml(const String& file, const std::vector<Scalar>& colors = std::vector<Scalar>()) const;
|
||||
|
||||
std::vector<Point3f> vtx;
|
||||
std::vector<Point3f> normals;
|
||||
float resolution;
|
||||
Octree octree;
|
||||
|
||||
const static Point3f allzero;
|
||||
};
|
||||
|
||||
class CV_EXPORTS SpinImageModel
|
||||
{
|
||||
public:
|
||||
|
||||
/* model parameters, leave unset for default or auto estimate */
|
||||
float normalRadius;
|
||||
int minNeighbors;
|
||||
|
||||
float binSize;
|
||||
int imageWidth;
|
||||
|
||||
float lambda;
|
||||
float gamma;
|
||||
|
||||
float T_GeometriccConsistency;
|
||||
float T_GroupingCorespondances;
|
||||
|
||||
/* public interface */
|
||||
SpinImageModel();
|
||||
explicit SpinImageModel(const Mesh3D& mesh);
|
||||
~SpinImageModel();
|
||||
|
||||
void selectRandomSubset(float ratio);
|
||||
void setSubset(const std::vector<int>& subset);
|
||||
void compute();
|
||||
|
||||
void match(const SpinImageModel& scene, std::vector< std::vector<Vec2i> >& result);
|
||||
|
||||
Mat packRandomScaledSpins(bool separateScale = false, size_t xCount = 10, size_t yCount = 10) const;
|
||||
|
||||
size_t getSpinCount() const { return spinImages.rows; }
|
||||
Mat getSpinImage(size_t index) const { return spinImages.row((int)index); }
|
||||
const Point3f& getSpinVertex(size_t index) const { return mesh.vtx[subset[index]]; }
|
||||
const Point3f& getSpinNormal(size_t index) const { return mesh.normals[subset[index]]; }
|
||||
|
||||
const Mesh3D& getMesh() const { return mesh; }
|
||||
Mesh3D& getMesh() { return mesh; }
|
||||
|
||||
/* static utility functions */
|
||||
static bool spinCorrelation(const Mat& spin1, const Mat& spin2, float lambda, float& result);
|
||||
|
||||
static Point2f calcSpinMapCoo(const Point3f& point, const Point3f& vertex, const Point3f& normal);
|
||||
|
||||
static float geometricConsistency(const Point3f& pointScene1, const Point3f& normalScene1,
|
||||
const Point3f& pointModel1, const Point3f& normalModel1,
|
||||
const Point3f& pointScene2, const Point3f& normalScene2,
|
||||
const Point3f& pointModel2, const Point3f& normalModel2);
|
||||
|
||||
static float groupingCreteria(const Point3f& pointScene1, const Point3f& normalScene1,
|
||||
const Point3f& pointModel1, const Point3f& normalModel1,
|
||||
const Point3f& pointScene2, const Point3f& normalScene2,
|
||||
const Point3f& pointModel2, const Point3f& normalModel2,
|
||||
float gamma);
|
||||
protected:
|
||||
void defaultParams();
|
||||
|
||||
void matchSpinToModel(const Mat& spin, std::vector<int>& indeces,
|
||||
std::vector<float>& corrCoeffs, bool useExtremeOutliers = true) const;
|
||||
|
||||
void repackSpinImages(const std::vector<uchar>& mask, Mat& spinImages, bool reAlloc = true) const;
|
||||
|
||||
std::vector<int> subset;
|
||||
Mesh3D mesh;
|
||||
Mat spinImages;
|
||||
};
|
||||
|
||||
class CV_EXPORTS TickMeter
|
||||
{
|
||||
public:
|
||||
TickMeter();
|
||||
void start();
|
||||
void stop();
|
||||
|
||||
int64 getTimeTicks() const;
|
||||
double getTimeMicro() const;
|
||||
double getTimeMilli() const;
|
||||
double getTimeSec() const;
|
||||
int64 getCounter() const;
|
||||
|
||||
void reset();
|
||||
private:
|
||||
int64 counter;
|
||||
int64 sumTime;
|
||||
int64 startTime;
|
||||
};
|
||||
|
||||
//CV_EXPORTS std::ostream& operator<<(std::ostream& out, const TickMeter& tm);
|
||||
|
||||
class CV_EXPORTS SelfSimDescriptor
|
||||
{
|
||||
public:
|
||||
SelfSimDescriptor();
|
||||
SelfSimDescriptor(int _ssize, int _lsize,
|
||||
int _startDistanceBucket=DEFAULT_START_DISTANCE_BUCKET,
|
||||
int _numberOfDistanceBuckets=DEFAULT_NUM_DISTANCE_BUCKETS,
|
||||
int _nangles=DEFAULT_NUM_ANGLES);
|
||||
SelfSimDescriptor(const SelfSimDescriptor& ss);
|
||||
virtual ~SelfSimDescriptor();
|
||||
SelfSimDescriptor& operator = (const SelfSimDescriptor& ss);
|
||||
|
||||
size_t getDescriptorSize() const;
|
||||
Size getGridSize( Size imgsize, Size winStride ) const;
|
||||
|
||||
virtual void compute(const Mat& img, std::vector<float>& descriptors, Size winStride=Size(),
|
||||
const std::vector<Point>& locations=std::vector<Point>()) const;
|
||||
virtual void computeLogPolarMapping(Mat& mappingMask) const;
|
||||
virtual void SSD(const Mat& img, Point pt, Mat& ssd) const;
|
||||
|
||||
int smallSize;
|
||||
int largeSize;
|
||||
int startDistanceBucket;
|
||||
int numberOfDistanceBuckets;
|
||||
int numberOfAngles;
|
||||
|
||||
enum { DEFAULT_SMALL_SIZE = 5, DEFAULT_LARGE_SIZE = 41,
|
||||
DEFAULT_NUM_ANGLES = 20, DEFAULT_START_DISTANCE_BUCKET = 3,
|
||||
DEFAULT_NUM_DISTANCE_BUCKETS = 7 };
|
||||
};
|
||||
|
||||
|
||||
CV_EXPORTS_W int chamerMatching( Mat& img, Mat& templ,
|
||||
CV_OUT std::vector<std::vector<Point> >& results, CV_OUT std::vector<float>& cost,
|
||||
double templScale=1, int maxMatches = 20,
|
||||
double minMatchDistance = 1.0, int padX = 3,
|
||||
int padY = 3, int scales = 5, double minScale = 0.6, double maxScale = 1.6,
|
||||
double orientationWeight = 0.5, double truncate = 20);
|
||||
|
||||
|
||||
class CV_EXPORTS_W StereoVar
|
||||
{
|
||||
public:
|
||||
// Flags
|
||||
enum {USE_INITIAL_DISPARITY = 1, USE_EQUALIZE_HIST = 2, USE_SMART_ID = 4, USE_AUTO_PARAMS = 8, USE_MEDIAN_FILTERING = 16};
|
||||
enum {CYCLE_O, CYCLE_V};
|
||||
enum {PENALIZATION_TICHONOV, PENALIZATION_CHARBONNIER, PENALIZATION_PERONA_MALIK};
|
||||
|
||||
//! the default constructor
|
||||
CV_WRAP StereoVar();
|
||||
|
||||
//! the full constructor taking all the necessary algorithm parameters
|
||||
CV_WRAP StereoVar(int levels, double pyrScale, int nIt, int minDisp, int maxDisp, int poly_n, double poly_sigma, float fi, float lambda, int penalization, int cycle, int flags);
|
||||
|
||||
//! the destructor
|
||||
virtual ~StereoVar();
|
||||
|
||||
//! the stereo correspondence operator that computes disparity map for the specified rectified stereo pair
|
||||
CV_WRAP_AS(compute) virtual void operator()(const Mat& left, const Mat& right, CV_OUT Mat& disp);
|
||||
|
||||
CV_PROP_RW int levels;
|
||||
CV_PROP_RW double pyrScale;
|
||||
CV_PROP_RW int nIt;
|
||||
CV_PROP_RW int minDisp;
|
||||
CV_PROP_RW int maxDisp;
|
||||
CV_PROP_RW int poly_n;
|
||||
CV_PROP_RW double poly_sigma;
|
||||
CV_PROP_RW float fi;
|
||||
CV_PROP_RW float lambda;
|
||||
CV_PROP_RW int penalization;
|
||||
CV_PROP_RW int cycle;
|
||||
CV_PROP_RW int flags;
|
||||
|
||||
private:
|
||||
void autoParams();
|
||||
void FMG(Mat &I1, Mat &I2, Mat &I2x, Mat &u, int level);
|
||||
void VCycle_MyFAS(Mat &I1_h, Mat &I2_h, Mat &I2x_h, Mat &u_h, int level);
|
||||
void VariationalSolver(Mat &I1_h, Mat &I2_h, Mat &I2x_h, Mat &u_h, int level);
|
||||
};
|
||||
|
||||
CV_EXPORTS void polyfit(const Mat& srcx, const Mat& srcy, Mat& dst, int order);
|
||||
|
||||
class CV_EXPORTS Directory
|
||||
{
|
||||
public:
|
||||
static std::vector<String> GetListFiles ( const String& path, const String & exten = "*", bool addPath = true );
|
||||
static std::vector<String> GetListFilesR ( const String& path, const String & exten = "*", bool addPath = true );
|
||||
static std::vector<String> GetListFolders( const String& path, const String & exten = "*", bool addPath = true );
|
||||
};
|
||||
|
||||
/*
|
||||
* Generation of a set of different colors by the following way:
|
||||
* 1) generate more then need colors (in "factor" times) in RGB,
|
||||
* 2) convert them to Lab,
|
||||
* 3) choose the needed count of colors from the set that are more different from
|
||||
* each other,
|
||||
* 4) convert the colors back to RGB
|
||||
*/
|
||||
CV_EXPORTS void generateColors( std::vector<Scalar>& colors, size_t count, size_t factor=100 );
|
||||
|
||||
|
||||
/*
|
||||
* Estimate the rigid body motion from frame0 to frame1. The method is based on the paper
|
||||
* "Real-Time Visual Odometry from Dense RGB-D Images", F. Steinbucker, J. Strum, D. Cremers, ICCV, 2011.
|
||||
*/
|
||||
enum { ROTATION = 1,
|
||||
TRANSLATION = 2,
|
||||
RIGID_BODY_MOTION = 4
|
||||
};
|
||||
CV_EXPORTS bool RGBDOdometry( Mat& Rt, const Mat& initRt,
|
||||
const Mat& image0, const Mat& depth0, const Mat& mask0,
|
||||
const Mat& image1, const Mat& depth1, const Mat& mask1,
|
||||
const Mat& cameraMatrix, float minDepth=0.f, float maxDepth=4.f, float maxDepthDiff=0.07f,
|
||||
const std::vector<int>& iterCounts=std::vector<int>(),
|
||||
const std::vector<float>& minGradientMagnitudes=std::vector<float>(),
|
||||
int transformType=RIGID_BODY_MOTION );
|
||||
|
||||
/**
|
||||
*Bilinear interpolation technique.
|
||||
*
|
||||
*The value of a desired cortical pixel is obtained through a bilinear interpolation of the values
|
||||
*of the four nearest neighbouring Cartesian pixels to the center of the RF.
|
||||
*The same principle is applied to the inverse transformation.
|
||||
*
|
||||
*More details can be found in http://dx.doi.org/10.1007/978-3-642-23968-7_5
|
||||
*/
|
||||
class CV_EXPORTS LogPolar_Interp
|
||||
{
|
||||
public:
|
||||
|
||||
LogPolar_Interp() {}
|
||||
|
||||
/**
|
||||
*Constructor
|
||||
*\param w the width of the input image
|
||||
*\param h the height of the input image
|
||||
*\param center the transformation center: where the output precision is maximal
|
||||
*\param R the number of rings of the cortical image (default value 70 pixel)
|
||||
*\param ro0 the radius of the blind spot (default value 3 pixel)
|
||||
*\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle.
|
||||
* \a 0 means that the retinal image is computed within the inscribed circle.
|
||||
*\param S the number of sectors of the cortical image (default value 70 pixel).
|
||||
* Its value is usually internally computed to obtain a pixel aspect ratio equals to 1.
|
||||
*\param sp \a 1 (default value) means that the parameter \a S is internally computed.
|
||||
* \a 0 means that the parameter \a S is provided by the user.
|
||||
*/
|
||||
LogPolar_Interp(int w, int h, Point2i center, int R=70, double ro0=3.0,
|
||||
int interp=INTER_LINEAR, int full=1, int S=117, int sp=1);
|
||||
/**
|
||||
*Transformation from Cartesian image to cortical (log-polar) image.
|
||||
*\param source the Cartesian image
|
||||
*\return the transformed image (cortical image)
|
||||
*/
|
||||
const Mat to_cortical(const Mat &source);
|
||||
/**
|
||||
*Transformation from cortical image to retinal (inverse log-polar) image.
|
||||
*\param source the cortical image
|
||||
*\return the transformed image (retinal image)
|
||||
*/
|
||||
const Mat to_cartesian(const Mat &source);
|
||||
/**
|
||||
*Destructor
|
||||
*/
|
||||
~LogPolar_Interp();
|
||||
|
||||
protected:
|
||||
|
||||
Mat Rsri;
|
||||
Mat Csri;
|
||||
|
||||
int S, R, M, N;
|
||||
int top, bottom,left,right;
|
||||
double ro0, romax, a, q;
|
||||
int interp;
|
||||
|
||||
Mat ETAyx;
|
||||
Mat CSIyx;
|
||||
|
||||
void create_map(int M, int N, int R, int S, double ro0);
|
||||
};
|
||||
|
||||
/**
|
||||
*Overlapping circular receptive fields technique
|
||||
*
|
||||
*The Cartesian plane is divided in two regions: the fovea and the periphery.
|
||||
*The fovea (oversampling) is handled by using the bilinear interpolation technique described above, whereas in
|
||||
*the periphery we use the overlapping Gaussian circular RFs.
|
||||
*
|
||||
*More details can be found in http://dx.doi.org/10.1007/978-3-642-23968-7_5
|
||||
*/
|
||||
class CV_EXPORTS LogPolar_Overlapping
|
||||
{
|
||||
public:
|
||||
LogPolar_Overlapping() {}
|
||||
|
||||
/**
|
||||
*Constructor
|
||||
*\param w the width of the input image
|
||||
*\param h the height of the input image
|
||||
*\param center the transformation center: where the output precision is maximal
|
||||
*\param R the number of rings of the cortical image (default value 70 pixel)
|
||||
*\param ro0 the radius of the blind spot (default value 3 pixel)
|
||||
*\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle.
|
||||
* \a 0 means that the retinal image is computed within the inscribed circle.
|
||||
*\param S the number of sectors of the cortical image (default value 70 pixel).
|
||||
* Its value is usually internally computed to obtain a pixel aspect ratio equals to 1.
|
||||
*\param sp \a 1 (default value) means that the parameter \a S is internally computed.
|
||||
* \a 0 means that the parameter \a S is provided by the user.
|
||||
*/
|
||||
LogPolar_Overlapping(int w, int h, Point2i center, int R=70,
|
||||
double ro0=3.0, int full=1, int S=117, int sp=1);
|
||||
/**
|
||||
*Transformation from Cartesian image to cortical (log-polar) image.
|
||||
*\param source the Cartesian image
|
||||
*\return the transformed image (cortical image)
|
||||
*/
|
||||
const Mat to_cortical(const Mat &source);
|
||||
/**
|
||||
*Transformation from cortical image to retinal (inverse log-polar) image.
|
||||
*\param source the cortical image
|
||||
*\return the transformed image (retinal image)
|
||||
*/
|
||||
const Mat to_cartesian(const Mat &source);
|
||||
/**
|
||||
*Destructor
|
||||
*/
|
||||
~LogPolar_Overlapping();
|
||||
|
||||
protected:
|
||||
|
||||
Mat Rsri;
|
||||
Mat Csri;
|
||||
std::vector<int> Rsr;
|
||||
std::vector<int> Csr;
|
||||
std::vector<double> Wsr;
|
||||
|
||||
int S, R, M, N, ind1;
|
||||
int top, bottom,left,right;
|
||||
double ro0, romax, a, q;
|
||||
|
||||
struct kernel
|
||||
{
|
||||
kernel() { w = 0; }
|
||||
std::vector<double> weights;
|
||||
int w;
|
||||
};
|
||||
|
||||
Mat ETAyx;
|
||||
Mat CSIyx;
|
||||
std::vector<kernel> w_ker_2D;
|
||||
|
||||
void create_map(int M, int N, int R, int S, double ro0);
|
||||
};
|
||||
|
||||
/**
|
||||
* Adjacent receptive fields technique
|
||||
*
|
||||
*All the Cartesian pixels, whose coordinates in the cortical domain share the same integer part, are assigned to the same RF.
|
||||
*The precision of the boundaries of the RF can be improved by breaking each pixel into subpixels and assigning each of them to the correct RF.
|
||||
*This technique is implemented from: Traver, V., Pla, F.: Log-polar mapping template design: From task-level requirements
|
||||
*to geometry parameters. Image Vision Comput. 26(10) (2008) 1354-1370
|
||||
*
|
||||
*More details can be found in http://dx.doi.org/10.1007/978-3-642-23968-7_5
|
||||
*/
|
||||
class CV_EXPORTS LogPolar_Adjacent
|
||||
{
|
||||
public:
|
||||
LogPolar_Adjacent() {}
|
||||
|
||||
/**
|
||||
*Constructor
|
||||
*\param w the width of the input image
|
||||
*\param h the height of the input image
|
||||
*\param center the transformation center: where the output precision is maximal
|
||||
*\param R the number of rings of the cortical image (default value 70 pixel)
|
||||
*\param ro0 the radius of the blind spot (default value 3 pixel)
|
||||
*\param smin the size of the subpixel (default value 0.25 pixel)
|
||||
*\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle.
|
||||
* \a 0 means that the retinal image is computed within the inscribed circle.
|
||||
*\param S the number of sectors of the cortical image (default value 70 pixel).
|
||||
* Its value is usually internally computed to obtain a pixel aspect ratio equals to 1.
|
||||
*\param sp \a 1 (default value) means that the parameter \a S is internally computed.
|
||||
* \a 0 means that the parameter \a S is provided by the user.
|
||||
*/
|
||||
LogPolar_Adjacent(int w, int h, Point2i center, int R=70, double ro0=3.0, double smin=0.25, int full=1, int S=117, int sp=1);
|
||||
/**
|
||||
*Transformation from Cartesian image to cortical (log-polar) image.
|
||||
*\param source the Cartesian image
|
||||
*\return the transformed image (cortical image)
|
||||
*/
|
||||
const Mat to_cortical(const Mat &source);
|
||||
/**
|
||||
*Transformation from cortical image to retinal (inverse log-polar) image.
|
||||
*\param source the cortical image
|
||||
*\return the transformed image (retinal image)
|
||||
*/
|
||||
const Mat to_cartesian(const Mat &source);
|
||||
/**
|
||||
*Destructor
|
||||
*/
|
||||
~LogPolar_Adjacent();
|
||||
|
||||
protected:
|
||||
struct pixel
|
||||
{
|
||||
pixel() { u = v = 0; a = 0.; }
|
||||
int u;
|
||||
int v;
|
||||
double a;
|
||||
};
|
||||
int S, R, M, N;
|
||||
int top, bottom,left,right;
|
||||
double ro0, romax, a, q;
|
||||
std::vector<std::vector<pixel> > L;
|
||||
std::vector<double> A;
|
||||
|
||||
void subdivide_recursively(double x, double y, int i, int j, double length, double smin);
|
||||
bool get_uv(double x, double y, int&u, int&v);
|
||||
void create_map(int M, int N, int R, int S, double ro0, double smin);
|
||||
};
|
||||
|
||||
CV_EXPORTS Mat subspaceProject(InputArray W, InputArray mean, InputArray src);
|
||||
CV_EXPORTS Mat subspaceReconstruct(InputArray W, InputArray mean, InputArray src);
|
||||
|
||||
class CV_EXPORTS LDA
|
||||
{
|
||||
public:
|
||||
// Initializes a LDA with num_components (default 0) and specifies how
|
||||
// samples are aligned (default dataAsRow=true).
|
||||
LDA(int num_components = 0) :
|
||||
_num_components(num_components) { }
|
||||
|
||||
// Initializes and performs a Discriminant Analysis with Fisher's
|
||||
// Optimization Criterion on given data in src and corresponding labels
|
||||
// in labels. If 0 (or less) number of components are given, they are
|
||||
// automatically determined for given data in computation.
|
||||
LDA(InputArrayOfArrays src, InputArray labels,
|
||||
int num_components = 0) :
|
||||
_num_components(num_components)
|
||||
{
|
||||
this->compute(src, labels); //! compute eigenvectors and eigenvalues
|
||||
}
|
||||
|
||||
// Serializes this object to a given filename.
|
||||
void save(const String& filename) const;
|
||||
|
||||
// Deserializes this object from a given filename.
|
||||
void load(const String& filename);
|
||||
|
||||
// Serializes this object to a given cv::FileStorage.
|
||||
void save(FileStorage& fs) const;
|
||||
|
||||
// Deserializes this object from a given cv::FileStorage.
|
||||
void load(const FileStorage& node);
|
||||
|
||||
// Destructor.
|
||||
~LDA() {}
|
||||
|
||||
//! Compute the discriminants for data in src and labels.
|
||||
void compute(InputArrayOfArrays src, InputArray labels);
|
||||
|
||||
// Projects samples into the LDA subspace.
|
||||
Mat project(InputArray src);
|
||||
|
||||
// Reconstructs projections from the LDA subspace.
|
||||
Mat reconstruct(InputArray src);
|
||||
|
||||
// Returns the eigenvectors of this LDA.
|
||||
Mat eigenvectors() const { return _eigenvectors; }
|
||||
|
||||
// Returns the eigenvalues of this LDA.
|
||||
Mat eigenvalues() const { return _eigenvalues; }
|
||||
|
||||
protected:
|
||||
bool _dataAsRow;
|
||||
int _num_components;
|
||||
Mat _eigenvectors;
|
||||
Mat _eigenvalues;
|
||||
|
||||
void lda(InputArrayOfArrays src, InputArray labels);
|
||||
};
|
||||
|
||||
class CV_EXPORTS_W FaceRecognizer : public Algorithm
|
||||
{
|
||||
public:
|
||||
//! virtual destructor
|
||||
virtual ~FaceRecognizer() {}
|
||||
|
||||
// Trains a FaceRecognizer.
|
||||
CV_WRAP virtual void train(InputArrayOfArrays src, InputArray labels) = 0;
|
||||
|
||||
// Updates a FaceRecognizer.
|
||||
CV_WRAP virtual void update(InputArrayOfArrays src, InputArray labels);
|
||||
|
||||
// Gets a prediction from a FaceRecognizer.
|
||||
virtual int predict(InputArray src) const = 0;
|
||||
|
||||
// Predicts the label and confidence for a given sample.
|
||||
CV_WRAP virtual void predict(InputArray src, CV_OUT int &label, CV_OUT double &confidence) const = 0;
|
||||
|
||||
// Serializes this object to a given filename.
|
||||
CV_WRAP virtual void save(const String& filename) const;
|
||||
|
||||
// Deserializes this object from a given filename.
|
||||
CV_WRAP virtual void load(const String& filename);
|
||||
|
||||
// Serializes this object to a given cv::FileStorage.
|
||||
virtual void save(FileStorage& fs) const = 0;
|
||||
|
||||
// Deserializes this object from a given cv::FileStorage.
|
||||
virtual void load(const FileStorage& fs) = 0;
|
||||
|
||||
};
|
||||
|
||||
CV_EXPORTS_W Ptr<FaceRecognizer> createEigenFaceRecognizer(int num_components = 0, double threshold = DBL_MAX);
|
||||
CV_EXPORTS_W Ptr<FaceRecognizer> createFisherFaceRecognizer(int num_components = 0, double threshold = DBL_MAX);
|
||||
CV_EXPORTS_W Ptr<FaceRecognizer> createLBPHFaceRecognizer(int radius=1, int neighbors=8,
|
||||
int grid_x=8, int grid_y=8, double threshold = DBL_MAX);
|
||||
|
||||
enum
|
||||
{
|
||||
COLORMAP_AUTUMN = 0,
|
||||
COLORMAP_BONE = 1,
|
||||
COLORMAP_JET = 2,
|
||||
COLORMAP_WINTER = 3,
|
||||
COLORMAP_RAINBOW = 4,
|
||||
COLORMAP_OCEAN = 5,
|
||||
COLORMAP_SUMMER = 6,
|
||||
COLORMAP_SPRING = 7,
|
||||
COLORMAP_COOL = 8,
|
||||
COLORMAP_HSV = 9,
|
||||
COLORMAP_PINK = 10,
|
||||
COLORMAP_HOT = 11
|
||||
};
|
||||
|
||||
CV_EXPORTS_W void applyColorMap(InputArray src, OutputArray dst, int colormap);
|
||||
|
||||
CV_EXPORTS bool initModule_contrib();
|
||||
}
|
||||
|
||||
#include "opencv2/contrib/openfabmap.hpp"
|
||||
|
||||
#endif
|
@ -1,384 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef __OPENCV_CONTRIB_COMPAT_HPP__
|
||||
#define __OPENCV_CONTRIB_COMPAT_HPP__
|
||||
|
||||
#include "opencv2/core/core_c.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
||||
/****************************************************************************************\
|
||||
* Adaptive Skin Detector *
|
||||
\****************************************************************************************/
|
||||
|
||||
class CV_EXPORTS CvAdaptiveSkinDetector
|
||||
{
|
||||
private:
|
||||
enum {
|
||||
GSD_HUE_LT = 3,
|
||||
GSD_HUE_UT = 33,
|
||||
GSD_INTENSITY_LT = 15,
|
||||
GSD_INTENSITY_UT = 250
|
||||
};
|
||||
|
||||
class CV_EXPORTS Histogram
|
||||
{
|
||||
private:
|
||||
enum {
|
||||
HistogramSize = (GSD_HUE_UT - GSD_HUE_LT + 1)
|
||||
};
|
||||
|
||||
protected:
|
||||
int findCoverageIndex(double surfaceToCover, int defaultValue = 0);
|
||||
|
||||
public:
|
||||
CvHistogram *fHistogram;
|
||||
Histogram();
|
||||
virtual ~Histogram();
|
||||
|
||||
void findCurveThresholds(int &x1, int &x2, double percent = 0.05);
|
||||
void mergeWith(Histogram *source, double weight);
|
||||
};
|
||||
|
||||
int nStartCounter, nFrameCount, nSkinHueLowerBound, nSkinHueUpperBound, nMorphingMethod, nSamplingDivider;
|
||||
double fHistogramMergeFactor, fHuePercentCovered;
|
||||
Histogram histogramHueMotion, skinHueHistogram;
|
||||
IplImage *imgHueFrame, *imgSaturationFrame, *imgLastGrayFrame, *imgMotionFrame, *imgFilteredFrame;
|
||||
IplImage *imgShrinked, *imgTemp, *imgGrayFrame, *imgHSVFrame;
|
||||
|
||||
protected:
|
||||
void initData(IplImage *src, int widthDivider, int heightDivider);
|
||||
void adaptiveFilter();
|
||||
|
||||
public:
|
||||
|
||||
enum {
|
||||
MORPHING_METHOD_NONE = 0,
|
||||
MORPHING_METHOD_ERODE = 1,
|
||||
MORPHING_METHOD_ERODE_ERODE = 2,
|
||||
MORPHING_METHOD_ERODE_DILATE = 3
|
||||
};
|
||||
|
||||
CvAdaptiveSkinDetector(int samplingDivider = 1, int morphingMethod = MORPHING_METHOD_NONE);
|
||||
virtual ~CvAdaptiveSkinDetector();
|
||||
|
||||
virtual void process(IplImage *inputBGRImage, IplImage *outputHueMask);
|
||||
};
|
||||
|
||||
|
||||
/****************************************************************************************\
|
||||
* Fuzzy MeanShift Tracker *
|
||||
\****************************************************************************************/
|
||||
|
||||
class CV_EXPORTS CvFuzzyPoint {
|
||||
public:
|
||||
double x, y, value;
|
||||
|
||||
CvFuzzyPoint(double _x, double _y);
|
||||
};
|
||||
|
||||
class CV_EXPORTS CvFuzzyCurve {
|
||||
private:
|
||||
std::vector<CvFuzzyPoint> points;
|
||||
double value, centre;
|
||||
|
||||
bool between(double x, double x1, double x2);
|
||||
|
||||
public:
|
||||
CvFuzzyCurve();
|
||||
~CvFuzzyCurve();
|
||||
|
||||
void setCentre(double _centre);
|
||||
double getCentre();
|
||||
void clear();
|
||||
void addPoint(double x, double y);
|
||||
double calcValue(double param);
|
||||
double getValue();
|
||||
void setValue(double _value);
|
||||
};
|
||||
|
||||
class CV_EXPORTS CvFuzzyFunction {
|
||||
public:
|
||||
std::vector<CvFuzzyCurve> curves;
|
||||
|
||||
CvFuzzyFunction();
|
||||
~CvFuzzyFunction();
|
||||
void addCurve(CvFuzzyCurve *curve, double value = 0);
|
||||
void resetValues();
|
||||
double calcValue();
|
||||
CvFuzzyCurve *newCurve();
|
||||
};
|
||||
|
||||
class CV_EXPORTS CvFuzzyRule {
|
||||
private:
|
||||
CvFuzzyCurve *fuzzyInput1, *fuzzyInput2;
|
||||
CvFuzzyCurve *fuzzyOutput;
|
||||
public:
|
||||
CvFuzzyRule();
|
||||
~CvFuzzyRule();
|
||||
void setRule(CvFuzzyCurve *c1, CvFuzzyCurve *c2, CvFuzzyCurve *o1);
|
||||
double calcValue(double param1, double param2);
|
||||
CvFuzzyCurve *getOutputCurve();
|
||||
};
|
||||
|
||||
class CV_EXPORTS CvFuzzyController {
|
||||
private:
|
||||
std::vector<CvFuzzyRule*> rules;
|
||||
public:
|
||||
CvFuzzyController();
|
||||
~CvFuzzyController();
|
||||
void addRule(CvFuzzyCurve *c1, CvFuzzyCurve *c2, CvFuzzyCurve *o1);
|
||||
double calcOutput(double param1, double param2);
|
||||
};
|
||||
|
||||
class CV_EXPORTS CvFuzzyMeanShiftTracker
|
||||
{
|
||||
private:
|
||||
class FuzzyResizer
|
||||
{
|
||||
private:
|
||||
CvFuzzyFunction iInput, iOutput;
|
||||
CvFuzzyController fuzzyController;
|
||||
public:
|
||||
FuzzyResizer();
|
||||
int calcOutput(double edgeDensity, double density);
|
||||
};
|
||||
|
||||
class SearchWindow
|
||||
{
|
||||
public:
|
||||
FuzzyResizer *fuzzyResizer;
|
||||
int x, y;
|
||||
int width, height, maxWidth, maxHeight, ellipseHeight, ellipseWidth;
|
||||
int ldx, ldy, ldw, ldh, numShifts, numIters;
|
||||
int xGc, yGc;
|
||||
long m00, m01, m10, m11, m02, m20;
|
||||
double ellipseAngle;
|
||||
double density;
|
||||
unsigned int depthLow, depthHigh;
|
||||
int verticalEdgeLeft, verticalEdgeRight, horizontalEdgeTop, horizontalEdgeBottom;
|
||||
|
||||
SearchWindow();
|
||||
~SearchWindow();
|
||||
void setSize(int _x, int _y, int _width, int _height);
|
||||
void initDepthValues(IplImage *maskImage, IplImage *depthMap);
|
||||
bool shift();
|
||||
void extractInfo(IplImage *maskImage, IplImage *depthMap, bool initDepth);
|
||||
void getResizeAttribsEdgeDensityLinear(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh);
|
||||
void getResizeAttribsInnerDensity(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh);
|
||||
void getResizeAttribsEdgeDensityFuzzy(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh);
|
||||
bool meanShift(IplImage *maskImage, IplImage *depthMap, int maxIteration, bool initDepth);
|
||||
};
|
||||
|
||||
public:
|
||||
enum TrackingState
|
||||
{
|
||||
tsNone = 0,
|
||||
tsSearching = 1,
|
||||
tsTracking = 2,
|
||||
tsSetWindow = 3,
|
||||
tsDisabled = 10
|
||||
};
|
||||
|
||||
enum ResizeMethod {
|
||||
rmEdgeDensityLinear = 0,
|
||||
rmEdgeDensityFuzzy = 1,
|
||||
rmInnerDensity = 2
|
||||
};
|
||||
|
||||
enum {
|
||||
MinKernelMass = 1000
|
||||
};
|
||||
|
||||
SearchWindow kernel;
|
||||
int searchMode;
|
||||
|
||||
private:
|
||||
enum
|
||||
{
|
||||
MaxMeanShiftIteration = 5,
|
||||
MaxSetSizeIteration = 5
|
||||
};
|
||||
|
||||
void findOptimumSearchWindow(SearchWindow &searchWindow, IplImage *maskImage, IplImage *depthMap, int maxIteration, int resizeMethod, bool initDepth);
|
||||
|
||||
public:
|
||||
CvFuzzyMeanShiftTracker();
|
||||
~CvFuzzyMeanShiftTracker();
|
||||
|
||||
void track(IplImage *maskImage, IplImage *depthMap, int resizeMethod, bool resetSearch, int minKernelMass = MinKernelMass);
|
||||
};
|
||||
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
typedef bool (*BundleAdjustCallback)(int iteration, double norm_error, void* user_data);
|
||||
|
||||
class CV_EXPORTS LevMarqSparse {
|
||||
public:
|
||||
LevMarqSparse();
|
||||
LevMarqSparse(int npoints, // number of points
|
||||
int ncameras, // number of cameras
|
||||
int nPointParams, // number of params per one point (3 in case of 3D points)
|
||||
int nCameraParams, // number of parameters per one camera
|
||||
int nErrParams, // number of parameters in measurement vector
|
||||
// for 1 point at one camera (2 in case of 2D projections)
|
||||
Mat& visibility, // visibility matrix. rows correspond to points, columns correspond to cameras
|
||||
// 1 - point is visible for the camera, 0 - invisible
|
||||
Mat& P0, // starting vector of parameters, first cameras then points
|
||||
Mat& X, // measurements, in order of visibility. non visible cases are skipped
|
||||
TermCriteria criteria, // termination criteria
|
||||
|
||||
// callback for estimation of Jacobian matrices
|
||||
void (*fjac)(int i, int j, Mat& point_params,
|
||||
Mat& cam_params, Mat& A, Mat& B, void* data),
|
||||
// callback for estimation of backprojection errors
|
||||
void (*func)(int i, int j, Mat& point_params,
|
||||
Mat& cam_params, Mat& estim, void* data),
|
||||
void* data, // user-specific data passed to the callbacks
|
||||
BundleAdjustCallback cb, void* user_data
|
||||
);
|
||||
|
||||
virtual ~LevMarqSparse();
|
||||
|
||||
virtual void run( int npoints, // number of points
|
||||
int ncameras, // number of cameras
|
||||
int nPointParams, // number of params per one point (3 in case of 3D points)
|
||||
int nCameraParams, // number of parameters per one camera
|
||||
int nErrParams, // number of parameters in measurement vector
|
||||
// for 1 point at one camera (2 in case of 2D projections)
|
||||
Mat& visibility, // visibility matrix. rows correspond to points, columns correspond to cameras
|
||||
// 1 - point is visible for the camera, 0 - invisible
|
||||
Mat& P0, // starting vector of parameters, first cameras then points
|
||||
Mat& X, // measurements, in order of visibility. non visible cases are skipped
|
||||
TermCriteria criteria, // termination criteria
|
||||
|
||||
// callback for estimation of Jacobian matrices
|
||||
void (CV_CDECL * fjac)(int i, int j, Mat& point_params,
|
||||
Mat& cam_params, Mat& A, Mat& B, void* data),
|
||||
// callback for estimation of backprojection errors
|
||||
void (CV_CDECL * func)(int i, int j, Mat& point_params,
|
||||
Mat& cam_params, Mat& estim, void* data),
|
||||
void* data // user-specific data passed to the callbacks
|
||||
);
|
||||
|
||||
virtual void clear();
|
||||
|
||||
// useful function to do simple bundle adjustment tasks
|
||||
static void bundleAdjust(std::vector<Point3d>& points, // positions of points in global coordinate system (input and output)
|
||||
const std::vector<std::vector<Point2d> >& imagePoints, // projections of 3d points for every camera
|
||||
const std::vector<std::vector<int> >& visibility, // visibility of 3d points for every camera
|
||||
std::vector<Mat>& cameraMatrix, // intrinsic matrices of all cameras (input and output)
|
||||
std::vector<Mat>& R, // rotation matrices of all cameras (input and output)
|
||||
std::vector<Mat>& T, // translation vector of all cameras (input and output)
|
||||
std::vector<Mat>& distCoeffs, // distortion coefficients of all cameras (input and output)
|
||||
const TermCriteria& criteria=
|
||||
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON),
|
||||
BundleAdjustCallback cb = 0, void* user_data = 0);
|
||||
|
||||
public:
|
||||
virtual void optimize(CvMat &_vis); //main function that runs minimization
|
||||
|
||||
//iteratively asks for measurement for visible camera-point pairs
|
||||
void ask_for_proj(CvMat &_vis,bool once=false);
|
||||
//iteratively asks for Jacobians for every camera_point pair
|
||||
void ask_for_projac(CvMat &_vis);
|
||||
|
||||
CvMat* err; //error X-hX
|
||||
double prevErrNorm, errNorm;
|
||||
double lambda;
|
||||
CvTermCriteria criteria;
|
||||
int iters;
|
||||
|
||||
CvMat** U; //size of array is equal to number of cameras
|
||||
CvMat** V; //size of array is equal to number of points
|
||||
CvMat** inv_V_star; //inverse of V*
|
||||
|
||||
CvMat** A;
|
||||
CvMat** B;
|
||||
CvMat** W;
|
||||
|
||||
CvMat* X; //measurement
|
||||
CvMat* hX; //current measurement extimation given new parameter vector
|
||||
|
||||
CvMat* prevP; //current already accepted parameter.
|
||||
CvMat* P; // parameters used to evaluate function with new params
|
||||
// this parameters may be rejected
|
||||
|
||||
CvMat* deltaP; //computed increase of parameters (result of normal system solution )
|
||||
|
||||
CvMat** ea; // sum_i AijT * e_ij , used as right part of normal equation
|
||||
// length of array is j = number of cameras
|
||||
CvMat** eb; // sum_j BijT * e_ij , used as right part of normal equation
|
||||
// length of array is i = number of points
|
||||
|
||||
CvMat** Yj; //length of array is i = num_points
|
||||
|
||||
CvMat* S; //big matrix of block Sjk , each block has size num_cam_params x num_cam_params
|
||||
|
||||
CvMat* JtJ_diag; //diagonal of JtJ, used to backup diagonal elements before augmentation
|
||||
|
||||
CvMat* Vis_index; // matrix which element is index of measurement for point i and camera j
|
||||
|
||||
int num_cams;
|
||||
int num_points;
|
||||
int num_err_param;
|
||||
int num_cam_param;
|
||||
int num_point_param;
|
||||
|
||||
//target function and jacobian pointers, which needs to be initialized
|
||||
void (*fjac)(int i, int j, Mat& point_params, Mat& cam_params, Mat& A, Mat& B, void* data);
|
||||
void (*func)(int i, int j, Mat& point_params, Mat& cam_params, Mat& estim, void* data);
|
||||
|
||||
void* data;
|
||||
|
||||
BundleAdjustCallback cb;
|
||||
void* user_data;
|
||||
};
|
||||
|
||||
} // cv
|
||||
|
||||
#endif /* __cplusplus */
|
||||
|
||||
#endif /* __OPENCV_CONTRIB_COMPAT_HPP__ */
|
@ -1,48 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifdef __OPENCV_BUILD
|
||||
#error this is a compatibility header which should not be used inside the OpenCV library
|
||||
#endif
|
||||
|
||||
#include "opencv2/contrib.hpp"
|
@ -1,173 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#if defined(__linux__) || defined(LINUX) || defined(__APPLE__) || defined(__ANDROID__)
|
||||
|
||||
#include <opencv2/core.hpp>
|
||||
#include <opencv2/objdetect.hpp>
|
||||
|
||||
#include <vector>
|
||||
|
||||
namespace cv
|
||||
{
|
||||
class CV_EXPORTS DetectionBasedTracker
|
||||
{
|
||||
public:
|
||||
struct Parameters
|
||||
{
|
||||
int maxTrackLifetime;
|
||||
int minDetectionPeriod; //the minimal time between run of the big object detector (on the whole frame) in ms (1000 mean 1 sec), default=0
|
||||
|
||||
Parameters();
|
||||
};
|
||||
|
||||
class IDetector
|
||||
{
|
||||
public:
|
||||
IDetector():
|
||||
minObjSize(96, 96),
|
||||
maxObjSize(INT_MAX, INT_MAX),
|
||||
minNeighbours(2),
|
||||
scaleFactor(1.1f)
|
||||
{}
|
||||
|
||||
virtual void detect(const cv::Mat& image, std::vector<cv::Rect>& objects) = 0;
|
||||
|
||||
void setMinObjectSize(const cv::Size& min)
|
||||
{
|
||||
minObjSize = min;
|
||||
}
|
||||
void setMaxObjectSize(const cv::Size& max)
|
||||
{
|
||||
maxObjSize = max;
|
||||
}
|
||||
cv::Size getMinObjectSize() const
|
||||
{
|
||||
return minObjSize;
|
||||
}
|
||||
cv::Size getMaxObjectSize() const
|
||||
{
|
||||
return maxObjSize;
|
||||
}
|
||||
float getScaleFactor()
|
||||
{
|
||||
return scaleFactor;
|
||||
}
|
||||
void setScaleFactor(float value)
|
||||
{
|
||||
scaleFactor = value;
|
||||
}
|
||||
int getMinNeighbours()
|
||||
{
|
||||
return minNeighbours;
|
||||
}
|
||||
void setMinNeighbours(int value)
|
||||
{
|
||||
minNeighbours = value;
|
||||
}
|
||||
virtual ~IDetector() {}
|
||||
|
||||
protected:
|
||||
cv::Size minObjSize;
|
||||
cv::Size maxObjSize;
|
||||
int minNeighbours;
|
||||
float scaleFactor;
|
||||
};
|
||||
|
||||
DetectionBasedTracker(cv::Ptr<IDetector> mainDetector, cv::Ptr<IDetector> trackingDetector, const Parameters& params);
|
||||
virtual ~DetectionBasedTracker();
|
||||
|
||||
virtual bool run();
|
||||
virtual void stop();
|
||||
virtual void resetTracking();
|
||||
|
||||
virtual void process(const cv::Mat& imageGray);
|
||||
|
||||
bool setParameters(const Parameters& params);
|
||||
const Parameters& getParameters() const;
|
||||
|
||||
|
||||
typedef std::pair<cv::Rect, int> Object;
|
||||
virtual void getObjects(std::vector<cv::Rect>& result) const;
|
||||
virtual void getObjects(std::vector<Object>& result) const;
|
||||
|
||||
enum ObjectStatus
|
||||
{
|
||||
DETECTED_NOT_SHOWN_YET,
|
||||
DETECTED,
|
||||
DETECTED_TEMPORARY_LOST,
|
||||
WRONG_OBJECT
|
||||
};
|
||||
struct ExtObject
|
||||
{
|
||||
int id;
|
||||
cv::Rect location;
|
||||
ObjectStatus status;
|
||||
ExtObject(int _id, cv::Rect _location, ObjectStatus _status)
|
||||
:id(_id), location(_location), status(_status)
|
||||
{
|
||||
}
|
||||
};
|
||||
virtual void getObjects(std::vector<ExtObject>& result) const;
|
||||
|
||||
|
||||
virtual int addObject(const cv::Rect& location); //returns id of the new object
|
||||
|
||||
protected:
|
||||
class SeparateDetectionWork;
|
||||
cv::Ptr<SeparateDetectionWork> separateDetectionWork;
|
||||
friend void* workcycleObjectDetectorFunction(void* p);
|
||||
|
||||
struct InnerParameters
|
||||
{
|
||||
int numLastPositionsToTrack;
|
||||
int numStepsToWaitBeforeFirstShow;
|
||||
int numStepsToTrackWithoutDetectingIfObjectHasNotBeenShown;
|
||||
int numStepsToShowWithoutDetecting;
|
||||
|
||||
float coeffTrackingWindowSize;
|
||||
float coeffObjectSizeToTrack;
|
||||
float coeffObjectSpeedUsingInPrediction;
|
||||
|
||||
InnerParameters();
|
||||
};
|
||||
Parameters parameters;
|
||||
InnerParameters innerParameters;
|
||||
|
||||
struct TrackedObject
|
||||
{
|
||||
typedef std::vector<cv::Rect> PositionsVector;
|
||||
|
||||
PositionsVector lastPositions;
|
||||
|
||||
int numDetectedFrames;
|
||||
int numFramesNotDetected;
|
||||
int id;
|
||||
|
||||
TrackedObject(const cv::Rect& rect):numDetectedFrames(1), numFramesNotDetected(0)
|
||||
{
|
||||
lastPositions.push_back(rect);
|
||||
id=getNextId();
|
||||
};
|
||||
|
||||
static int getNextId()
|
||||
{
|
||||
static int _id=0;
|
||||
return _id++;
|
||||
}
|
||||
};
|
||||
|
||||
int numTrackedSteps;
|
||||
std::vector<TrackedObject> trackedObjects;
|
||||
|
||||
std::vector<float> weightsPositionsSmoothing;
|
||||
std::vector<float> weightsSizesSmoothing;
|
||||
|
||||
cv::Ptr<IDetector> cascadeForTracking;
|
||||
|
||||
void updateTrackedObjects(const std::vector<cv::Rect>& detectedObjects);
|
||||
cv::Rect calcTrackedObjectPositionToShow(int i) const;
|
||||
cv::Rect calcTrackedObjectPositionToShow(int i, ObjectStatus& status) const;
|
||||
void detectInRegion(const cv::Mat& img, const cv::Rect& r, std::vector<cv::Rect>& detectedObjectsInRegions);
|
||||
};
|
||||
} //end of cv namespace
|
||||
#endif
|
@ -1,219 +0,0 @@
|
||||
//*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef __OPENCV_HYBRIDTRACKER_H_
|
||||
#define __OPENCV_HYBRIDTRACKER_H_
|
||||
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/features2d.hpp"
|
||||
#include "opencv2/video/tracking.hpp"
|
||||
#include "opencv2/ml.hpp"
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
// Motion model for tracking algorithm. Currently supports objects that do not move much.
|
||||
// To add Kalman filter
|
||||
struct CV_EXPORTS CvMotionModel
|
||||
{
|
||||
enum {LOW_PASS_FILTER = 0, KALMAN_FILTER = 1, EM = 2};
|
||||
|
||||
CvMotionModel()
|
||||
{
|
||||
}
|
||||
|
||||
float low_pass_gain; // low pass gain
|
||||
};
|
||||
|
||||
// Mean Shift Tracker parameters for specifying use of HSV channel and CamShift parameters.
|
||||
struct CV_EXPORTS CvMeanShiftTrackerParams
|
||||
{
|
||||
enum { H = 0, HS = 1, HSV = 2 };
|
||||
CvMeanShiftTrackerParams(int tracking_type = CvMeanShiftTrackerParams::HS,
|
||||
CvTermCriteria term_crit = CvTermCriteria());
|
||||
|
||||
int tracking_type;
|
||||
std::vector<float> h_range;
|
||||
std::vector<float> s_range;
|
||||
std::vector<float> v_range;
|
||||
CvTermCriteria term_crit;
|
||||
};
|
||||
|
||||
// Feature tracking parameters
|
||||
struct CV_EXPORTS CvFeatureTrackerParams
|
||||
{
|
||||
enum { SIFT = 0, SURF = 1, OPTICAL_FLOW = 2 };
|
||||
CvFeatureTrackerParams(int featureType = 0, int windowSize = 0)
|
||||
{
|
||||
feature_type = featureType;
|
||||
window_size = windowSize;
|
||||
}
|
||||
|
||||
int feature_type; // Feature type to use
|
||||
int window_size; // Window size in pixels around which to search for new window
|
||||
};
|
||||
|
||||
// Hybrid Tracking parameters for specifying weights of individual trackers and motion model.
|
||||
struct CV_EXPORTS CvHybridTrackerParams
|
||||
{
|
||||
CvHybridTrackerParams(float ft_tracker_weight = 0.5, float ms_tracker_weight = 0.5,
|
||||
CvFeatureTrackerParams ft_params = CvFeatureTrackerParams(),
|
||||
CvMeanShiftTrackerParams ms_params = CvMeanShiftTrackerParams(),
|
||||
CvMotionModel model = CvMotionModel());
|
||||
|
||||
float ft_tracker_weight;
|
||||
float ms_tracker_weight;
|
||||
CvFeatureTrackerParams ft_params;
|
||||
CvMeanShiftTrackerParams ms_params;
|
||||
int motion_model;
|
||||
float low_pass_gain;
|
||||
};
|
||||
|
||||
// Performs Camshift using parameters from MeanShiftTrackerParams
|
||||
class CV_EXPORTS CvMeanShiftTracker
|
||||
{
|
||||
private:
|
||||
Mat hsv, hue;
|
||||
Mat backproj;
|
||||
Mat mask, maskroi;
|
||||
MatND hist;
|
||||
Rect prev_trackwindow;
|
||||
RotatedRect prev_trackbox;
|
||||
Point2f prev_center;
|
||||
|
||||
public:
|
||||
CvMeanShiftTrackerParams params;
|
||||
|
||||
CvMeanShiftTracker();
|
||||
explicit CvMeanShiftTracker(CvMeanShiftTrackerParams _params);
|
||||
~CvMeanShiftTracker();
|
||||
void newTrackingWindow(Mat image, Rect selection);
|
||||
RotatedRect updateTrackingWindow(Mat image);
|
||||
Mat getHistogramProjection(int type);
|
||||
void setTrackingWindow(Rect _window);
|
||||
Rect getTrackingWindow();
|
||||
RotatedRect getTrackingEllipse();
|
||||
Point2f getTrackingCenter();
|
||||
};
|
||||
|
||||
// Performs SIFT/SURF feature tracking using parameters from FeatureTrackerParams
|
||||
class CV_EXPORTS CvFeatureTracker
|
||||
{
|
||||
private:
|
||||
Ptr<Feature2D> dd;
|
||||
Ptr<DescriptorMatcher> matcher;
|
||||
std::vector<DMatch> matches;
|
||||
|
||||
Mat prev_image;
|
||||
Mat prev_image_bw;
|
||||
Rect prev_trackwindow;
|
||||
Point2d prev_center;
|
||||
|
||||
int ittr;
|
||||
std::vector<Point2f> features[2];
|
||||
|
||||
public:
|
||||
Mat disp_matches;
|
||||
CvFeatureTrackerParams params;
|
||||
|
||||
CvFeatureTracker();
|
||||
explicit CvFeatureTracker(CvFeatureTrackerParams params);
|
||||
~CvFeatureTracker();
|
||||
void newTrackingWindow(Mat image, Rect selection);
|
||||
Rect updateTrackingWindow(Mat image);
|
||||
Rect updateTrackingWindowWithSIFT(Mat image);
|
||||
Rect updateTrackingWindowWithFlow(Mat image);
|
||||
void setTrackingWindow(Rect _window);
|
||||
Rect getTrackingWindow();
|
||||
Point2f getTrackingCenter();
|
||||
};
|
||||
|
||||
// Performs Hybrid Tracking and combines individual trackers using EM or filters
|
||||
class CV_EXPORTS CvHybridTracker
|
||||
{
|
||||
private:
|
||||
CvMeanShiftTracker* mstracker;
|
||||
CvFeatureTracker* fttracker;
|
||||
|
||||
CvMat* samples;
|
||||
CvMat* labels;
|
||||
|
||||
Rect prev_window;
|
||||
Point2f prev_center;
|
||||
Mat prev_proj;
|
||||
RotatedRect trackbox;
|
||||
|
||||
int ittr;
|
||||
Point2f curr_center;
|
||||
|
||||
inline float getL2Norm(Point2f p1, Point2f p2);
|
||||
Mat getDistanceProjection(Mat image, Point2f center);
|
||||
Mat getGaussianProjection(Mat image, int ksize, double sigma, Point2f center);
|
||||
void updateTrackerWithEM(Mat image);
|
||||
void updateTrackerWithLowPassFilter(Mat image);
|
||||
|
||||
public:
|
||||
CvHybridTrackerParams params;
|
||||
CvHybridTracker();
|
||||
explicit CvHybridTracker(CvHybridTrackerParams params);
|
||||
~CvHybridTracker();
|
||||
|
||||
void newTracker(Mat image, Rect selection);
|
||||
void updateTracker(Mat image);
|
||||
Rect getTrackingWindow();
|
||||
};
|
||||
|
||||
typedef CvMotionModel MotionModel;
|
||||
typedef CvMeanShiftTrackerParams MeanShiftTrackerParams;
|
||||
typedef CvFeatureTrackerParams FeatureTrackerParams;
|
||||
typedef CvHybridTrackerParams HybridTrackerParams;
|
||||
typedef CvMeanShiftTracker MeanShiftTracker;
|
||||
typedef CvFeatureTracker FeatureTracker;
|
||||
typedef CvHybridTracker HybridTracker;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
@ -1,401 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
// This file originates from the openFABMAP project:
|
||||
// [http://code.google.com/p/openfabmap/]
|
||||
//
|
||||
// For published work which uses all or part of OpenFABMAP, please cite:
|
||||
// [http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=6224843]
|
||||
//
|
||||
// Original Algorithm by Mark Cummins and Paul Newman:
|
||||
// [http://ijr.sagepub.com/content/27/6/647.short]
|
||||
// [http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=5613942]
|
||||
// [http://ijr.sagepub.com/content/30/9/1100.abstract]
|
||||
//
|
||||
// License Agreement
|
||||
//
|
||||
// Copyright (C) 2012 Arren Glover [aj.glover@qut.edu.au] and
|
||||
// Will Maddern [w.maddern@qut.edu.au], all rights reserved.
|
||||
//
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef __OPENCV_OPENFABMAP_H_
|
||||
#define __OPENCV_OPENFABMAP_H_
|
||||
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/features2d.hpp"
|
||||
|
||||
#include <vector>
|
||||
#include <list>
|
||||
#include <map>
|
||||
#include <set>
|
||||
#include <valarray>
|
||||
|
||||
namespace cv {
|
||||
|
||||
namespace of2 {
|
||||
|
||||
/*
|
||||
Return data format of a FABMAP compare call
|
||||
*/
|
||||
struct CV_EXPORTS IMatch {
|
||||
|
||||
IMatch() :
|
||||
queryIdx(-1), imgIdx(-1), likelihood(-DBL_MAX), match(-DBL_MAX) {
|
||||
}
|
||||
IMatch(int _queryIdx, int _imgIdx, double _likelihood, double _match) :
|
||||
queryIdx(_queryIdx), imgIdx(_imgIdx), likelihood(_likelihood), match(
|
||||
_match) {
|
||||
}
|
||||
|
||||
int queryIdx; //query index
|
||||
int imgIdx; //test index
|
||||
|
||||
double likelihood; //raw loglikelihood
|
||||
double match; //normalised probability
|
||||
|
||||
bool operator<(const IMatch& m) const {
|
||||
return match < m.match;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
/*
|
||||
Base FabMap class. Each FabMap method inherits from this class.
|
||||
*/
|
||||
class CV_EXPORTS FabMap {
|
||||
public:
|
||||
|
||||
//FabMap options
|
||||
enum {
|
||||
MEAN_FIELD = 1,
|
||||
SAMPLED = 2,
|
||||
NAIVE_BAYES = 4,
|
||||
CHOW_LIU = 8,
|
||||
MOTION_MODEL = 16
|
||||
};
|
||||
|
||||
FabMap(const Mat& clTree, double PzGe, double PzGNe, int flags,
|
||||
int numSamples = 0);
|
||||
virtual ~FabMap();
|
||||
|
||||
//methods to add training data for sampling method
|
||||
virtual void addTraining(const Mat& queryImgDescriptor);
|
||||
virtual void addTraining(const std::vector<Mat>& queryImgDescriptors);
|
||||
|
||||
//methods to add to the test data
|
||||
virtual void add(const Mat& queryImgDescriptor);
|
||||
virtual void add(const std::vector<Mat>& queryImgDescriptors);
|
||||
|
||||
//accessors
|
||||
const std::vector<Mat>& getTrainingImgDescriptors() const;
|
||||
const std::vector<Mat>& getTestImgDescriptors() const;
|
||||
|
||||
//Main FabMap image comparison
|
||||
void compare(const Mat& queryImgDescriptor,
|
||||
std::vector<IMatch>& matches, bool addQuery = false,
|
||||
const Mat& mask = Mat());
|
||||
void compare(const Mat& queryImgDescriptor,
|
||||
const Mat& testImgDescriptors, std::vector<IMatch>& matches,
|
||||
const Mat& mask = Mat());
|
||||
void compare(const Mat& queryImgDescriptor,
|
||||
const std::vector<Mat>& testImgDescriptors,
|
||||
std::vector<IMatch>& matches, const Mat& mask = Mat());
|
||||
void compare(const std::vector<Mat>& queryImgDescriptors, std::vector<
|
||||
IMatch>& matches, bool addQuery = false, const Mat& mask =
|
||||
Mat());
|
||||
void compare(const std::vector<Mat>& queryImgDescriptors,
|
||||
const std::vector<Mat>& testImgDescriptors,
|
||||
std::vector<IMatch>& matches, const Mat& mask = Mat());
|
||||
|
||||
protected:
|
||||
|
||||
void compareImgDescriptor(const Mat& queryImgDescriptor,
|
||||
int queryIndex, const std::vector<Mat>& testImgDescriptors,
|
||||
std::vector<IMatch>& matches);
|
||||
|
||||
void addImgDescriptor(const Mat& queryImgDescriptor);
|
||||
|
||||
//the getLikelihoods method is overwritten for each different FabMap
|
||||
//method.
|
||||
virtual void getLikelihoods(const Mat& queryImgDescriptor,
|
||||
const std::vector<Mat>& testImgDescriptors,
|
||||
std::vector<IMatch>& matches);
|
||||
virtual double getNewPlaceLikelihood(const Mat& queryImgDescriptor);
|
||||
|
||||
//turn likelihoods into probabilities (also add in motion model if used)
|
||||
void normaliseDistribution(std::vector<IMatch>& matches);
|
||||
|
||||
//Chow-Liu Tree
|
||||
int pq(int q);
|
||||
double Pzq(int q, bool zq);
|
||||
double PzqGzpq(int q, bool zq, bool zpq);
|
||||
|
||||
//FAB-MAP Core
|
||||
double PzqGeq(bool zq, bool eq);
|
||||
double PeqGL(int q, bool Lzq, bool eq);
|
||||
double PzqGL(int q, bool zq, bool zpq, bool Lzq);
|
||||
double PzqGzpqL(int q, bool zq, bool zpq, bool Lzq);
|
||||
double (FabMap::*PzGL)(int q, bool zq, bool zpq, bool Lzq);
|
||||
|
||||
//data
|
||||
Mat clTree;
|
||||
std::vector<Mat> trainingImgDescriptors;
|
||||
std::vector<Mat> testImgDescriptors;
|
||||
std::vector<IMatch> priorMatches;
|
||||
|
||||
//parameters
|
||||
double PzGe;
|
||||
double PzGNe;
|
||||
double Pnew;
|
||||
|
||||
double mBias;
|
||||
double sFactor;
|
||||
|
||||
int flags;
|
||||
int numSamples;
|
||||
|
||||
};
|
||||
|
||||
/*
|
||||
The original FAB-MAP algorithm, developed based on:
|
||||
http://ijr.sagepub.com/content/27/6/647.short
|
||||
*/
|
||||
class CV_EXPORTS FabMap1: public FabMap {
|
||||
public:
|
||||
FabMap1(const Mat& clTree, double PzGe, double PzGNe, int flags,
|
||||
int numSamples = 0);
|
||||
virtual ~FabMap1();
|
||||
protected:
|
||||
|
||||
//FabMap1 implementation of likelihood comparison
|
||||
void getLikelihoods(const Mat& queryImgDescriptor, const std::vector<
|
||||
Mat>& testImgDescriptors, std::vector<IMatch>& matches);
|
||||
};
|
||||
|
||||
/*
|
||||
A computationally faster version of the original FAB-MAP algorithm. A look-
|
||||
up-table is used to precompute many of the reoccuring calculations
|
||||
*/
|
||||
class CV_EXPORTS FabMapLUT: public FabMap {
|
||||
public:
|
||||
FabMapLUT(const Mat& clTree, double PzGe, double PzGNe,
|
||||
int flags, int numSamples = 0, int precision = 6);
|
||||
virtual ~FabMapLUT();
|
||||
protected:
|
||||
|
||||
//FabMap look-up-table implementation of the likelihood comparison
|
||||
void getLikelihoods(const Mat& queryImgDescriptor, const std::vector<
|
||||
Mat>& testImgDescriptors, std::vector<IMatch>& matches);
|
||||
|
||||
//precomputed data
|
||||
int (*table)[8];
|
||||
|
||||
//data precision
|
||||
int precision;
|
||||
};
|
||||
|
||||
/*
|
||||
The Accelerated FAB-MAP algorithm, developed based on:
|
||||
http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=5613942
|
||||
*/
|
||||
class CV_EXPORTS FabMapFBO: public FabMap {
|
||||
public:
|
||||
FabMapFBO(const Mat& clTree, double PzGe, double PzGNe, int flags,
|
||||
int numSamples = 0, double rejectionThreshold = 1e-8, double PsGd =
|
||||
1e-8, int bisectionStart = 512, int bisectionIts = 9);
|
||||
virtual ~FabMapFBO();
|
||||
|
||||
protected:
|
||||
|
||||
//FabMap Fast Bail-out implementation of the likelihood comparison
|
||||
void getLikelihoods(const Mat& queryImgDescriptor, const std::vector<
|
||||
Mat>& testImgDescriptors, std::vector<IMatch>& matches);
|
||||
|
||||
//stucture used to determine word comparison order
|
||||
struct WordStats {
|
||||
WordStats() :
|
||||
q(0), info(0), V(0), M(0) {
|
||||
}
|
||||
|
||||
WordStats(int _q, double _info) :
|
||||
q(_q), info(_info), V(0), M(0) {
|
||||
}
|
||||
|
||||
int q;
|
||||
double info;
|
||||
mutable double V;
|
||||
mutable double M;
|
||||
|
||||
bool operator<(const WordStats& w) const {
|
||||
return info < w.info;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
//private fast bail-out necessary functions
|
||||
void setWordStatistics(const Mat& queryImgDescriptor, std::multiset<WordStats>& wordData);
|
||||
double limitbisection(double v, double m);
|
||||
double bennettInequality(double v, double m, double delta);
|
||||
static bool compInfo(const WordStats& first, const WordStats& second);
|
||||
|
||||
//parameters
|
||||
double PsGd;
|
||||
double rejectionThreshold;
|
||||
int bisectionStart;
|
||||
int bisectionIts;
|
||||
};
|
||||
|
||||
/*
|
||||
The FAB-MAP2.0 algorithm, developed based on:
|
||||
http://ijr.sagepub.com/content/30/9/1100.abstract
|
||||
*/
|
||||
class CV_EXPORTS FabMap2: public FabMap {
|
||||
public:
|
||||
|
||||
FabMap2(const Mat& clTree, double PzGe, double PzGNe, int flags);
|
||||
virtual ~FabMap2();
|
||||
|
||||
//FabMap2 builds the inverted index and requires an additional training/test
|
||||
//add function
|
||||
void addTraining(const Mat& queryImgDescriptors) {
|
||||
FabMap::addTraining(queryImgDescriptors);
|
||||
}
|
||||
void addTraining(const std::vector<Mat>& queryImgDescriptors);
|
||||
|
||||
void add(const Mat& queryImgDescriptors) {
|
||||
FabMap::add(queryImgDescriptors);
|
||||
}
|
||||
void add(const std::vector<Mat>& queryImgDescriptors);
|
||||
|
||||
protected:
|
||||
|
||||
//FabMap2 implementation of the likelihood comparison
|
||||
void getLikelihoods(const Mat& queryImgDescriptor, const std::vector<
|
||||
Mat>& testImgDescriptors, std::vector<IMatch>& matches);
|
||||
double getNewPlaceLikelihood(const Mat& queryImgDescriptor);
|
||||
|
||||
//the likelihood function using the inverted index
|
||||
void getIndexLikelihoods(const Mat& queryImgDescriptor, std::vector<
|
||||
double>& defaults, std::map<int, std::vector<int> >& invertedMap,
|
||||
std::vector<IMatch>& matches);
|
||||
void addToIndex(const Mat& queryImgDescriptor,
|
||||
std::vector<double>& defaults,
|
||||
std::map<int, std::vector<int> >& invertedMap);
|
||||
|
||||
//data
|
||||
std::vector<double> d1, d2, d3, d4;
|
||||
std::vector<std::vector<int> > children;
|
||||
|
||||
// TODO: inverted map a vector?
|
||||
|
||||
std::vector<double> trainingDefaults;
|
||||
std::map<int, std::vector<int> > trainingInvertedMap;
|
||||
|
||||
std::vector<double> testDefaults;
|
||||
std::map<int, std::vector<int> > testInvertedMap;
|
||||
|
||||
};
|
||||
/*
|
||||
A Chow-Liu tree is required by FAB-MAP. The Chow-Liu tree provides an
|
||||
estimate of the full distribution of visual words using a minimum spanning
|
||||
tree. The tree is generated through training data.
|
||||
*/
|
||||
class CV_EXPORTS ChowLiuTree {
|
||||
public:
|
||||
ChowLiuTree();
|
||||
virtual ~ChowLiuTree();
|
||||
|
||||
//add data to the chow-liu tree before calling make
|
||||
void add(const Mat& imgDescriptor);
|
||||
void add(const std::vector<Mat>& imgDescriptors);
|
||||
|
||||
const std::vector<Mat>& getImgDescriptors() const;
|
||||
|
||||
Mat make(double infoThreshold = 0.0);
|
||||
|
||||
private:
|
||||
std::vector<Mat> imgDescriptors;
|
||||
Mat mergedImgDescriptors;
|
||||
|
||||
typedef struct info {
|
||||
float score;
|
||||
short word1;
|
||||
short word2;
|
||||
} info;
|
||||
|
||||
//probabilities extracted from mergedImgDescriptors
|
||||
double P(int a, bool za);
|
||||
double JP(int a, bool za, int b, bool zb); //a & b
|
||||
double CP(int a, bool za, int b, bool zb); // a | b
|
||||
|
||||
//calculating mutual information of all edges
|
||||
void createBaseEdges(std::list<info>& edges, double infoThreshold);
|
||||
double calcMutInfo(int word1, int word2);
|
||||
static bool sortInfoScores(const info& first, const info& second);
|
||||
|
||||
//selecting minimum spanning egdges with maximum information
|
||||
bool reduceEdgesToMinSpan(std::list<info>& edges);
|
||||
|
||||
//building the tree sctructure
|
||||
Mat buildTree(int root_word, std::list<info> &edges);
|
||||
void recAddToTree(Mat &cltree, int q, int pq,
|
||||
std::list<info> &remaining_edges);
|
||||
std::vector<int> extractChildren(std::list<info> &remaining_edges, int q);
|
||||
|
||||
};
|
||||
|
||||
/*
|
||||
A custom vocabulary training method based on:
|
||||
http://www.springerlink.com/content/d1h6j8x552532003/
|
||||
*/
|
||||
class CV_EXPORTS BOWMSCTrainer: public BOWTrainer {
|
||||
public:
|
||||
BOWMSCTrainer(double clusterSize = 0.4);
|
||||
virtual ~BOWMSCTrainer();
|
||||
|
||||
// Returns trained vocabulary (i.e. cluster centers).
|
||||
virtual Mat cluster() const;
|
||||
virtual Mat cluster(const Mat& descriptors) const;
|
||||
|
||||
protected:
|
||||
|
||||
double clusterSize;
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif /* OPENFABMAP_H_ */
|
@ -1,288 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install, copy or use the software.
|
||||
//
|
||||
// Copyright (C) 2009, Farhad Dadgostar
|
||||
// Intel Corporation and third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
#include "opencv2/imgproc/imgproc_c.h"
|
||||
#include "opencv2/contrib/compat.hpp"
|
||||
|
||||
#define ASD_INTENSITY_SET_PIXEL(pointer, qq) {(*pointer) = (unsigned char)qq;}
|
||||
|
||||
#define ASD_IS_IN_MOTION(pointer, v, threshold) ((abs((*(pointer)) - (v)) > (threshold)) ? true : false)
|
||||
|
||||
void CvAdaptiveSkinDetector::initData(IplImage *src, int widthDivider, int heightDivider)
|
||||
{
|
||||
CvSize imageSize = cvSize(src->width/widthDivider, src->height/heightDivider);
|
||||
|
||||
imgHueFrame = cvCreateImage(imageSize, IPL_DEPTH_8U, 1);
|
||||
imgShrinked = cvCreateImage(imageSize, IPL_DEPTH_8U, src->nChannels);
|
||||
imgSaturationFrame = cvCreateImage(imageSize, IPL_DEPTH_8U, 1);
|
||||
imgMotionFrame = cvCreateImage(imageSize, IPL_DEPTH_8U, 1);
|
||||
imgTemp = cvCreateImage(imageSize, IPL_DEPTH_8U, 1);
|
||||
imgFilteredFrame = cvCreateImage(imageSize, IPL_DEPTH_8U, 1);
|
||||
imgGrayFrame = cvCreateImage(imageSize, IPL_DEPTH_8U, 1);
|
||||
imgLastGrayFrame = cvCreateImage(imageSize, IPL_DEPTH_8U, 1);
|
||||
imgHSVFrame = cvCreateImage(imageSize, IPL_DEPTH_8U, 3);
|
||||
}
|
||||
|
||||
CvAdaptiveSkinDetector::CvAdaptiveSkinDetector(int samplingDivider, int morphingMethod)
|
||||
{
|
||||
nSkinHueLowerBound = GSD_HUE_LT;
|
||||
nSkinHueUpperBound = GSD_HUE_UT;
|
||||
|
||||
fHistogramMergeFactor = 0.05; // empirical result
|
||||
fHuePercentCovered = 0.95; // empirical result
|
||||
|
||||
nMorphingMethod = morphingMethod;
|
||||
nSamplingDivider = samplingDivider;
|
||||
|
||||
nFrameCount = 0;
|
||||
nStartCounter = 0;
|
||||
|
||||
imgHueFrame = NULL;
|
||||
imgMotionFrame = NULL;
|
||||
imgTemp = NULL;
|
||||
imgFilteredFrame = NULL;
|
||||
imgShrinked = NULL;
|
||||
imgGrayFrame = NULL;
|
||||
imgLastGrayFrame = NULL;
|
||||
imgSaturationFrame = NULL;
|
||||
imgHSVFrame = NULL;
|
||||
}
|
||||
|
||||
CvAdaptiveSkinDetector::~CvAdaptiveSkinDetector()
|
||||
{
|
||||
cvReleaseImage(&imgHueFrame);
|
||||
cvReleaseImage(&imgSaturationFrame);
|
||||
cvReleaseImage(&imgMotionFrame);
|
||||
cvReleaseImage(&imgTemp);
|
||||
cvReleaseImage(&imgFilteredFrame);
|
||||
cvReleaseImage(&imgShrinked);
|
||||
cvReleaseImage(&imgGrayFrame);
|
||||
cvReleaseImage(&imgLastGrayFrame);
|
||||
cvReleaseImage(&imgHSVFrame);
|
||||
}
|
||||
|
||||
void CvAdaptiveSkinDetector::process(IplImage *inputBGRImage, IplImage *outputHueMask)
|
||||
{
|
||||
IplImage *src = inputBGRImage;
|
||||
|
||||
int h, v, i, l;
|
||||
bool isInit = false;
|
||||
|
||||
nFrameCount++;
|
||||
|
||||
if (imgHueFrame == NULL)
|
||||
{
|
||||
isInit = true;
|
||||
initData(src, nSamplingDivider, nSamplingDivider);
|
||||
}
|
||||
|
||||
unsigned char *pShrinked, *pHueFrame, *pMotionFrame, *pLastGrayFrame, *pFilteredFrame, *pGrayFrame;
|
||||
pShrinked = (unsigned char *)imgShrinked->imageData;
|
||||
pHueFrame = (unsigned char *)imgHueFrame->imageData;
|
||||
pMotionFrame = (unsigned char *)imgMotionFrame->imageData;
|
||||
pLastGrayFrame = (unsigned char *)imgLastGrayFrame->imageData;
|
||||
pFilteredFrame = (unsigned char *)imgFilteredFrame->imageData;
|
||||
pGrayFrame = (unsigned char *)imgGrayFrame->imageData;
|
||||
|
||||
if ((src->width != imgHueFrame->width) || (src->height != imgHueFrame->height))
|
||||
{
|
||||
cvResize(src, imgShrinked);
|
||||
cvCvtColor(imgShrinked, imgHSVFrame, CV_BGR2HSV);
|
||||
}
|
||||
else
|
||||
{
|
||||
cvCvtColor(src, imgHSVFrame, CV_BGR2HSV);
|
||||
}
|
||||
|
||||
cvSplit(imgHSVFrame, imgHueFrame, imgSaturationFrame, imgGrayFrame, 0);
|
||||
|
||||
cvSetZero(imgMotionFrame);
|
||||
cvSetZero(imgFilteredFrame);
|
||||
|
||||
l = imgHueFrame->height * imgHueFrame->width;
|
||||
|
||||
for (i = 0; i < l; i++)
|
||||
{
|
||||
v = (*pGrayFrame);
|
||||
if ((v >= GSD_INTENSITY_LT) && (v <= GSD_INTENSITY_UT))
|
||||
{
|
||||
h = (*pHueFrame);
|
||||
if ((h >= GSD_HUE_LT) && (h <= GSD_HUE_UT))
|
||||
{
|
||||
if ((h >= nSkinHueLowerBound) && (h <= nSkinHueUpperBound))
|
||||
ASD_INTENSITY_SET_PIXEL(pFilteredFrame, h);
|
||||
|
||||
if (ASD_IS_IN_MOTION(pLastGrayFrame, v, 7))
|
||||
ASD_INTENSITY_SET_PIXEL(pMotionFrame, h);
|
||||
}
|
||||
}
|
||||
pShrinked += 3;
|
||||
pGrayFrame++;
|
||||
pLastGrayFrame++;
|
||||
pMotionFrame++;
|
||||
pHueFrame++;
|
||||
pFilteredFrame++;
|
||||
}
|
||||
|
||||
if (isInit)
|
||||
cvCalcHist(&imgHueFrame, skinHueHistogram.fHistogram);
|
||||
|
||||
cvCopy(imgGrayFrame, imgLastGrayFrame);
|
||||
|
||||
cvErode(imgMotionFrame, imgTemp); // eliminate disperse pixels, which occur because of the camera noise
|
||||
cvDilate(imgTemp, imgMotionFrame);
|
||||
|
||||
cvCalcHist(&imgMotionFrame, histogramHueMotion.fHistogram);
|
||||
|
||||
skinHueHistogram.mergeWith(&histogramHueMotion, fHistogramMergeFactor);
|
||||
|
||||
skinHueHistogram.findCurveThresholds(nSkinHueLowerBound, nSkinHueUpperBound, 1 - fHuePercentCovered);
|
||||
|
||||
switch (nMorphingMethod)
|
||||
{
|
||||
case MORPHING_METHOD_ERODE :
|
||||
cvErode(imgFilteredFrame, imgTemp);
|
||||
cvCopy(imgTemp, imgFilteredFrame);
|
||||
break;
|
||||
case MORPHING_METHOD_ERODE_ERODE :
|
||||
cvErode(imgFilteredFrame, imgTemp);
|
||||
cvErode(imgTemp, imgFilteredFrame);
|
||||
break;
|
||||
case MORPHING_METHOD_ERODE_DILATE :
|
||||
cvErode(imgFilteredFrame, imgTemp);
|
||||
cvDilate(imgTemp, imgFilteredFrame);
|
||||
break;
|
||||
}
|
||||
|
||||
if (outputHueMask != NULL)
|
||||
cvCopy(imgFilteredFrame, outputHueMask);
|
||||
}
|
||||
|
||||
|
||||
//------------------------- Histogram for Adaptive Skin Detector -------------------------//
|
||||
|
||||
CvAdaptiveSkinDetector::Histogram::Histogram()
|
||||
{
|
||||
int histogramSize[] = { HistogramSize };
|
||||
float range[] = { GSD_HUE_LT, GSD_HUE_UT };
|
||||
float *ranges[] = { range };
|
||||
fHistogram = cvCreateHist(1, histogramSize, CV_HIST_ARRAY, ranges, 1);
|
||||
cvClearHist(fHistogram);
|
||||
}
|
||||
|
||||
CvAdaptiveSkinDetector::Histogram::~Histogram()
|
||||
{
|
||||
cvReleaseHist(&fHistogram);
|
||||
}
|
||||
|
||||
int CvAdaptiveSkinDetector::Histogram::findCoverageIndex(double surfaceToCover, int defaultValue)
|
||||
{
|
||||
double s = 0;
|
||||
for (int i = 0; i < HistogramSize; i++)
|
||||
{
|
||||
s += cvGetReal1D( fHistogram->bins, i );
|
||||
if (s >= surfaceToCover)
|
||||
{
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return defaultValue;
|
||||
}
|
||||
|
||||
void CvAdaptiveSkinDetector::Histogram::findCurveThresholds(int &x1, int &x2, double percent)
|
||||
{
|
||||
double sum = 0;
|
||||
|
||||
for (int i = 0; i < HistogramSize; i++)
|
||||
{
|
||||
sum += cvGetReal1D( fHistogram->bins, i );
|
||||
}
|
||||
|
||||
x1 = findCoverageIndex(sum * percent, -1);
|
||||
x2 = findCoverageIndex(sum * (1-percent), -1);
|
||||
|
||||
if (x1 == -1)
|
||||
x1 = GSD_HUE_LT;
|
||||
else
|
||||
x1 += GSD_HUE_LT;
|
||||
|
||||
if (x2 == -1)
|
||||
x2 = GSD_HUE_UT;
|
||||
else
|
||||
x2 += GSD_HUE_LT;
|
||||
}
|
||||
|
||||
void CvAdaptiveSkinDetector::Histogram::mergeWith(CvAdaptiveSkinDetector::Histogram *source, double weight)
|
||||
{
|
||||
float myweight = (float)(1-weight);
|
||||
float maxVal1 = 0, maxVal2 = 0, *f1, *f2, ff1, ff2;
|
||||
|
||||
cvGetMinMaxHistValue(source->fHistogram, NULL, &maxVal2);
|
||||
|
||||
if (maxVal2 > 0 )
|
||||
{
|
||||
cvGetMinMaxHistValue(fHistogram, NULL, &maxVal1);
|
||||
if (maxVal1 <= 0)
|
||||
{
|
||||
for (int i = 0; i < HistogramSize; i++)
|
||||
{
|
||||
f1 = (float*)cvPtr1D(fHistogram->bins, i);
|
||||
f2 = (float*)cvPtr1D(source->fHistogram->bins, i);
|
||||
(*f1) = (*f2);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (int i = 0; i < HistogramSize; i++)
|
||||
{
|
||||
f1 = (float*)cvPtr1D(fHistogram->bins, i);
|
||||
f2 = (float*)cvPtr1D(source->fHistogram->bins, i);
|
||||
|
||||
ff1 = ((*f1)/maxVal1)*myweight;
|
||||
if (ff1 < 0)
|
||||
ff1 = -ff1;
|
||||
|
||||
ff2 = (float)(((*f2)/maxVal2)*weight);
|
||||
if (ff2 < 0)
|
||||
ff2 = -ff2;
|
||||
|
||||
(*f1) = (ff1 + ff2);
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,138 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
// This file originates from the openFABMAP project:
|
||||
// [http://code.google.com/p/openfabmap/]
|
||||
//
|
||||
// For published work which uses all or part of OpenFABMAP, please cite:
|
||||
// [http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=6224843]
|
||||
//
|
||||
// Original Algorithm by Mark Cummins and Paul Newman:
|
||||
// [http://ijr.sagepub.com/content/27/6/647.short]
|
||||
// [http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=5613942]
|
||||
// [http://ijr.sagepub.com/content/30/9/1100.abstract]
|
||||
//
|
||||
// License Agreement
|
||||
//
|
||||
// Copyright (C) 2012 Arren Glover [aj.glover@qut.edu.au] and
|
||||
// Will Maddern [w.maddern@qut.edu.au], all rights reserved.
|
||||
//
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
#include "opencv2/contrib/openfabmap.hpp"
|
||||
|
||||
namespace cv {
|
||||
|
||||
namespace of2 {
|
||||
|
||||
BOWMSCTrainer::BOWMSCTrainer(double _clusterSize) :
|
||||
clusterSize(_clusterSize) {
|
||||
}
|
||||
|
||||
BOWMSCTrainer::~BOWMSCTrainer() {
|
||||
}
|
||||
|
||||
Mat BOWMSCTrainer::cluster() const {
|
||||
CV_Assert(!descriptors.empty());
|
||||
int descCount = 0;
|
||||
for(size_t i = 0; i < descriptors.size(); i++)
|
||||
descCount += descriptors[i].rows;
|
||||
|
||||
Mat mergedDescriptors(descCount, descriptors[0].cols,
|
||||
descriptors[0].type());
|
||||
for(size_t i = 0, start = 0; i < descriptors.size(); i++)
|
||||
{
|
||||
Mat submut = mergedDescriptors.rowRange((int)start,
|
||||
(int)(start + descriptors[i].rows));
|
||||
descriptors[i].copyTo(submut);
|
||||
start += descriptors[i].rows;
|
||||
}
|
||||
return cluster(mergedDescriptors);
|
||||
}
|
||||
|
||||
Mat BOWMSCTrainer::cluster(const Mat& _descriptors) const {
|
||||
|
||||
CV_Assert(!_descriptors.empty());
|
||||
|
||||
// TODO: sort the descriptors before clustering.
|
||||
|
||||
|
||||
Mat icovar = Mat::eye(_descriptors.cols,_descriptors.cols,_descriptors.type());
|
||||
|
||||
std::vector<Mat> initialCentres;
|
||||
initialCentres.push_back(_descriptors.row(0));
|
||||
for (int i = 1; i < _descriptors.rows; i++) {
|
||||
double minDist = DBL_MAX;
|
||||
for (size_t j = 0; j < initialCentres.size(); j++) {
|
||||
minDist = std::min(minDist,
|
||||
cv::Mahalanobis(_descriptors.row(i),initialCentres[j],
|
||||
icovar));
|
||||
}
|
||||
if (minDist > clusterSize)
|
||||
initialCentres.push_back(_descriptors.row(i));
|
||||
}
|
||||
|
||||
std::vector<std::list<cv::Mat> > clusters;
|
||||
clusters.resize(initialCentres.size());
|
||||
for (int i = 0; i < _descriptors.rows; i++) {
|
||||
int index = 0; double dist = 0, minDist = DBL_MAX;
|
||||
for (size_t j = 0; j < initialCentres.size(); j++) {
|
||||
dist = cv::Mahalanobis(_descriptors.row(i),initialCentres[j],icovar);
|
||||
if (dist < minDist) {
|
||||
minDist = dist;
|
||||
index = (int)j;
|
||||
}
|
||||
}
|
||||
clusters[index].push_back(_descriptors.row(i));
|
||||
}
|
||||
|
||||
// TODO: throw away small clusters.
|
||||
|
||||
Mat vocabulary;
|
||||
Mat centre = Mat::zeros(1,_descriptors.cols,_descriptors.type());
|
||||
for (size_t i = 0; i < clusters.size(); i++) {
|
||||
centre.setTo(0);
|
||||
for (std::list<cv::Mat>::iterator Ci = clusters[i].begin(); Ci != clusters[i].end(); Ci++) {
|
||||
centre += *Ci;
|
||||
}
|
||||
centre /= (double)clusters[i].size();
|
||||
vocabulary.push_back(centre);
|
||||
}
|
||||
|
||||
return vocabulary;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
@ -1,289 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
// This file originates from the openFABMAP project:
|
||||
// [http://code.google.com/p/openfabmap/]
|
||||
//
|
||||
// For published work which uses all or part of OpenFABMAP, please cite:
|
||||
// [http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=6224843]
|
||||
//
|
||||
// Original Algorithm by Mark Cummins and Paul Newman:
|
||||
// [http://ijr.sagepub.com/content/27/6/647.short]
|
||||
// [http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=5613942]
|
||||
// [http://ijr.sagepub.com/content/30/9/1100.abstract]
|
||||
//
|
||||
// License Agreement
|
||||
//
|
||||
// Copyright (C) 2012 Arren Glover [aj.glover@qut.edu.au] and
|
||||
// Will Maddern [w.maddern@qut.edu.au], all rights reserved.
|
||||
//
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
#include "opencv2/contrib/openfabmap.hpp"
|
||||
|
||||
namespace cv {
|
||||
|
||||
namespace of2 {
|
||||
|
||||
ChowLiuTree::ChowLiuTree() {
|
||||
}
|
||||
|
||||
ChowLiuTree::~ChowLiuTree() {
|
||||
}
|
||||
|
||||
void ChowLiuTree::add(const Mat& imgDescriptor) {
|
||||
CV_Assert(!imgDescriptor.empty());
|
||||
if (!imgDescriptors.empty()) {
|
||||
CV_Assert(imgDescriptors[0].cols == imgDescriptor.cols);
|
||||
CV_Assert(imgDescriptors[0].type() == imgDescriptor.type());
|
||||
}
|
||||
|
||||
imgDescriptors.push_back(imgDescriptor);
|
||||
|
||||
}
|
||||
|
||||
void ChowLiuTree::add(const std::vector<Mat>& _imgDescriptors) {
|
||||
for (size_t i = 0; i < _imgDescriptors.size(); i++) {
|
||||
add(_imgDescriptors[i]);
|
||||
}
|
||||
}
|
||||
|
||||
const std::vector<cv::Mat>& ChowLiuTree::getImgDescriptors() const {
|
||||
return imgDescriptors;
|
||||
}
|
||||
|
||||
Mat ChowLiuTree::make(double infoThreshold) {
|
||||
CV_Assert(!imgDescriptors.empty());
|
||||
|
||||
unsigned int descCount = 0;
|
||||
for (size_t i = 0; i < imgDescriptors.size(); i++)
|
||||
descCount += imgDescriptors[i].rows;
|
||||
|
||||
mergedImgDescriptors = cv::Mat(descCount, imgDescriptors[0].cols,
|
||||
imgDescriptors[0].type());
|
||||
for (size_t i = 0, start = 0; i < imgDescriptors.size(); i++)
|
||||
{
|
||||
Mat submut = mergedImgDescriptors.rowRange((int)start,
|
||||
(int)(start + imgDescriptors[i].rows));
|
||||
imgDescriptors[i].copyTo(submut);
|
||||
start += imgDescriptors[i].rows;
|
||||
}
|
||||
|
||||
std::list<info> edges;
|
||||
createBaseEdges(edges, infoThreshold);
|
||||
|
||||
// TODO: if it cv_asserts here they really won't know why.
|
||||
|
||||
CV_Assert(reduceEdgesToMinSpan(edges));
|
||||
|
||||
return buildTree(edges.front().word1, edges);
|
||||
}
|
||||
|
||||
double ChowLiuTree::P(int a, bool za) {
|
||||
|
||||
if(za) {
|
||||
return (0.98 * cv::countNonZero(mergedImgDescriptors.col(a)) /
|
||||
mergedImgDescriptors.rows) + 0.01;
|
||||
} else {
|
||||
return 1 - ((0.98 * cv::countNonZero(mergedImgDescriptors.col(a)) /
|
||||
mergedImgDescriptors.rows) + 0.01);
|
||||
}
|
||||
|
||||
}
|
||||
double ChowLiuTree::JP(int a, bool za, int b, bool zb) {
|
||||
|
||||
double count = 0;
|
||||
for(int i = 0; i < mergedImgDescriptors.rows; i++) {
|
||||
if((mergedImgDescriptors.at<float>(i,a) > 0) == za &&
|
||||
(mergedImgDescriptors.at<float>(i,b) > 0) == zb) {
|
||||
count++;
|
||||
}
|
||||
}
|
||||
return count / mergedImgDescriptors.rows;
|
||||
|
||||
}
|
||||
double ChowLiuTree::CP(int a, bool za, int b, bool zb){
|
||||
|
||||
int count = 0, total = 0;
|
||||
for(int i = 0; i < mergedImgDescriptors.rows; i++) {
|
||||
if((mergedImgDescriptors.at<float>(i,b) > 0) == zb) {
|
||||
total++;
|
||||
if((mergedImgDescriptors.at<float>(i,a) > 0) == za) {
|
||||
count++;
|
||||
}
|
||||
}
|
||||
}
|
||||
if(total) {
|
||||
return (double)(0.98 * count)/total + 0.01;
|
||||
} else {
|
||||
return (za) ? 0.01 : 0.99;
|
||||
}
|
||||
}
|
||||
|
||||
cv::Mat ChowLiuTree::buildTree(int root_word, std::list<info> &edges) {
|
||||
|
||||
int q = root_word;
|
||||
cv::Mat cltree(4, (int)edges.size()+1, CV_64F);
|
||||
|
||||
cltree.at<double>(0, q) = q;
|
||||
cltree.at<double>(1, q) = P(q, true);
|
||||
cltree.at<double>(2, q) = P(q, true);
|
||||
cltree.at<double>(3, q) = P(q, true);
|
||||
//setting P(zq|zpq) to P(zq) gives the root node of the chow-liu
|
||||
//independence from a parent node.
|
||||
|
||||
//find all children and do the same
|
||||
std::vector<int> nextqs = extractChildren(edges, q);
|
||||
|
||||
int pq = q;
|
||||
std::vector<int>::iterator nextq;
|
||||
for(nextq = nextqs.begin(); nextq != nextqs.end(); nextq++) {
|
||||
recAddToTree(cltree, *nextq, pq, edges);
|
||||
}
|
||||
|
||||
return cltree;
|
||||
|
||||
|
||||
}
|
||||
|
||||
void ChowLiuTree::recAddToTree(cv::Mat &cltree, int q, int pq,
|
||||
std::list<info>& remaining_edges) {
|
||||
|
||||
cltree.at<double>(0, q) = pq;
|
||||
cltree.at<double>(1, q) = P(q, true);
|
||||
cltree.at<double>(2, q) = CP(q, true, pq, true);
|
||||
cltree.at<double>(3, q) = CP(q, true, pq, false);
|
||||
|
||||
//find all children and do the same
|
||||
std::vector<int> nextqs = extractChildren(remaining_edges, q);
|
||||
|
||||
pq = q;
|
||||
std::vector<int>::iterator nextq;
|
||||
for(nextq = nextqs.begin(); nextq != nextqs.end(); nextq++) {
|
||||
recAddToTree(cltree, *nextq, pq, remaining_edges);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<int> ChowLiuTree::extractChildren(std::list<info> &remaining_edges, int q) {
|
||||
|
||||
std::vector<int> children;
|
||||
std::list<info>::iterator edge = remaining_edges.begin();
|
||||
|
||||
while(edge != remaining_edges.end()) {
|
||||
if(edge->word1 == q) {
|
||||
children.push_back(edge->word2);
|
||||
edge = remaining_edges.erase(edge);
|
||||
continue;
|
||||
}
|
||||
if(edge->word2 == q) {
|
||||
children.push_back(edge->word1);
|
||||
edge = remaining_edges.erase(edge);
|
||||
continue;
|
||||
}
|
||||
edge++;
|
||||
}
|
||||
|
||||
return children;
|
||||
}
|
||||
|
||||
bool ChowLiuTree::sortInfoScores(const info& first, const info& second) {
|
||||
return first.score > second.score;
|
||||
}
|
||||
|
||||
double ChowLiuTree::calcMutInfo(int word1, int word2) {
|
||||
double accumulation = 0;
|
||||
|
||||
double P00 = JP(word1, false, word2, false);
|
||||
if(P00) accumulation += P00 * std::log(P00 / (P(word1, false)*P(word2, false)));
|
||||
|
||||
double P01 = JP(word1, false, word2, true);
|
||||
if(P01) accumulation += P01 * std::log(P01 / (P(word1, false)*P(word2, true)));
|
||||
|
||||
double P10 = JP(word1, true, word2, false);
|
||||
if(P10) accumulation += P10 * std::log(P10 / (P(word1, true)*P(word2, false)));
|
||||
|
||||
double P11 = JP(word1, true, word2, true);
|
||||
if(P11) accumulation += P11 * std::log(P11 / (P(word1, true)*P(word2, true)));
|
||||
|
||||
return accumulation;
|
||||
}
|
||||
|
||||
void ChowLiuTree::createBaseEdges(std::list<info>& edges, double infoThreshold) {
|
||||
|
||||
int nWords = imgDescriptors[0].cols;
|
||||
info mutInfo;
|
||||
|
||||
for(int word1 = 0; word1 < nWords; word1++) {
|
||||
for(int word2 = word1 + 1; word2 < nWords; word2++) {
|
||||
mutInfo.word1 = (short)word1;
|
||||
mutInfo.word2 = (short)word2;
|
||||
mutInfo.score = (float)calcMutInfo(word1, word2);
|
||||
if(mutInfo.score >= infoThreshold)
|
||||
edges.push_back(mutInfo);
|
||||
}
|
||||
}
|
||||
edges.sort(sortInfoScores);
|
||||
}
|
||||
|
||||
bool ChowLiuTree::reduceEdgesToMinSpan(std::list<info>& edges) {
|
||||
|
||||
std::map<int, int> groups;
|
||||
std::map<int, int>::iterator groupIt;
|
||||
for(int i = 0; i < imgDescriptors[0].cols; i++) groups[i] = i;
|
||||
int group1, group2;
|
||||
|
||||
std::list<info>::iterator edge = edges.begin();
|
||||
while(edge != edges.end()) {
|
||||
if(groups[edge->word1] != groups[edge->word2]) {
|
||||
group1 = groups[edge->word1];
|
||||
group2 = groups[edge->word2];
|
||||
for(groupIt = groups.begin(); groupIt != groups.end(); groupIt++)
|
||||
if(groupIt->second == group2) groupIt->second = group1;
|
||||
edge++;
|
||||
} else {
|
||||
edge = edges.erase(edge);
|
||||
}
|
||||
}
|
||||
|
||||
if(edges.size() != (unsigned int)imgDescriptors[0].cols - 1) {
|
||||
return false;
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
@ -1,530 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2011. Philipp Wagner <bytefish[at]gmx[dot]de>.
|
||||
* Released to public domain under terms of the BSD Simplified license.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* * Neither the name of the organization nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
*
|
||||
* See <http://www.opensource.org/licenses/bsd-license>
|
||||
*/
|
||||
#include "precomp.hpp"
|
||||
#include <iostream>
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning( disable: 4305 )
|
||||
#endif
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
static Mat linspace(float x0, float x1, int n)
|
||||
{
|
||||
Mat pts(n, 1, CV_32FC1);
|
||||
float step = (x1-x0)/(n-1);
|
||||
for(int i = 0; i < n; i++)
|
||||
pts.at<float>(i,0) = x0+i*step;
|
||||
return pts;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// cv::sortMatrixRowsByIndices
|
||||
//------------------------------------------------------------------------------
|
||||
static void sortMatrixRowsByIndices(InputArray _src, InputArray _indices, OutputArray _dst)
|
||||
{
|
||||
if(_indices.getMat().type() != CV_32SC1)
|
||||
CV_Error(Error::StsUnsupportedFormat, "cv::sortRowsByIndices only works on integer indices!");
|
||||
Mat src = _src.getMat();
|
||||
std::vector<int> indices = _indices.getMat();
|
||||
_dst.create(src.rows, src.cols, src.type());
|
||||
Mat dst = _dst.getMat();
|
||||
for(size_t idx = 0; idx < indices.size(); idx++) {
|
||||
Mat originalRow = src.row(indices[idx]);
|
||||
Mat sortedRow = dst.row((int)idx);
|
||||
originalRow.copyTo(sortedRow);
|
||||
}
|
||||
}
|
||||
|
||||
static Mat sortMatrixRowsByIndices(InputArray src, InputArray indices)
|
||||
{
|
||||
Mat dst;
|
||||
sortMatrixRowsByIndices(src, indices, dst);
|
||||
return dst;
|
||||
}
|
||||
|
||||
|
||||
static Mat argsort(InputArray _src, bool ascending=true)
|
||||
{
|
||||
Mat src = _src.getMat();
|
||||
if (src.rows != 1 && src.cols != 1)
|
||||
CV_Error(Error::StsBadArg, "cv::argsort only sorts 1D matrices.");
|
||||
int flags = SORT_EVERY_ROW | (ascending ? SORT_ASCENDING : SORT_DESCENDING);
|
||||
Mat sorted_indices;
|
||||
sortIdx(src.reshape(1,1),sorted_indices,flags);
|
||||
return sorted_indices;
|
||||
}
|
||||
|
||||
template <typename _Tp> static
|
||||
Mat interp1_(const Mat& X_, const Mat& Y_, const Mat& XI)
|
||||
{
|
||||
int n = XI.rows;
|
||||
// sort input table
|
||||
std::vector<int> sort_indices = argsort(X_);
|
||||
|
||||
Mat X = sortMatrixRowsByIndices(X_,sort_indices);
|
||||
Mat Y = sortMatrixRowsByIndices(Y_,sort_indices);
|
||||
// interpolated values
|
||||
Mat yi = Mat::zeros(XI.size(), XI.type());
|
||||
for(int i = 0; i < n; i++) {
|
||||
int c = 0;
|
||||
int low = 0;
|
||||
int high = X.rows - 1;
|
||||
// set bounds
|
||||
if(XI.at<_Tp>(i,0) < X.at<_Tp>(low, 0))
|
||||
high = 1;
|
||||
if(XI.at<_Tp>(i,0) > X.at<_Tp>(high, 0))
|
||||
low = high - 1;
|
||||
// binary search
|
||||
while((high-low)>1) {
|
||||
c = low + ((high - low) >> 1);
|
||||
if(XI.at<_Tp>(i,0) > X.at<_Tp>(c,0)) {
|
||||
low = c;
|
||||
} else {
|
||||
high = c;
|
||||
}
|
||||
}
|
||||
// linear interpolation
|
||||
yi.at<_Tp>(i,0) += Y.at<_Tp>(low,0)
|
||||
+ (XI.at<_Tp>(i,0) - X.at<_Tp>(low,0))
|
||||
* (Y.at<_Tp>(high,0) - Y.at<_Tp>(low,0))
|
||||
/ (X.at<_Tp>(high,0) - X.at<_Tp>(low,0));
|
||||
}
|
||||
return yi;
|
||||
}
|
||||
|
||||
static Mat interp1(InputArray _x, InputArray _Y, InputArray _xi)
|
||||
{
|
||||
// get matrices
|
||||
Mat x = _x.getMat();
|
||||
Mat Y = _Y.getMat();
|
||||
Mat xi = _xi.getMat();
|
||||
// check types & alignment
|
||||
CV_Assert((x.type() == Y.type()) && (Y.type() == xi.type()));
|
||||
CV_Assert((x.cols == 1) && (x.rows == Y.rows) && (x.cols == Y.cols));
|
||||
// call templated interp1
|
||||
switch(x.type()) {
|
||||
case CV_8SC1: return interp1_<char>(x,Y,xi); break;
|
||||
case CV_8UC1: return interp1_<unsigned char>(x,Y,xi); break;
|
||||
case CV_16SC1: return interp1_<short>(x,Y,xi); break;
|
||||
case CV_16UC1: return interp1_<unsigned short>(x,Y,xi); break;
|
||||
case CV_32SC1: return interp1_<int>(x,Y,xi); break;
|
||||
case CV_32FC1: return interp1_<float>(x,Y,xi); break;
|
||||
case CV_64FC1: return interp1_<double>(x,Y,xi); break;
|
||||
default: CV_Error(Error::StsUnsupportedFormat, ""); break;
|
||||
}
|
||||
return Mat();
|
||||
}
|
||||
|
||||
namespace colormap
|
||||
{
|
||||
|
||||
class ColorMap {
|
||||
|
||||
protected:
|
||||
Mat _lut;
|
||||
|
||||
public:
|
||||
virtual ~ColorMap() {}
|
||||
|
||||
// Applies the colormap on a given image.
|
||||
//
|
||||
// This function expects BGR-aligned data of type CV_8UC1 or
|
||||
// CV_8UC3. If the wrong image type is given, the original image
|
||||
// will be returned.
|
||||
//
|
||||
// Throws an error for wrong-aligned lookup table, which must be
|
||||
// of size 256 in the latest OpenCV release (2.3.1).
|
||||
void operator()(InputArray src, OutputArray dst) const;
|
||||
|
||||
// Setup base map to interpolate from.
|
||||
virtual void init(int n) = 0;
|
||||
|
||||
// Interpolates from a base colormap.
|
||||
static Mat linear_colormap(InputArray X,
|
||||
InputArray r, InputArray g, InputArray b,
|
||||
int n) {
|
||||
return linear_colormap(X,r,g,b,linspace(0,1,n));
|
||||
}
|
||||
|
||||
// Interpolates from a base colormap.
|
||||
static Mat linear_colormap(InputArray X,
|
||||
InputArray r, InputArray g, InputArray b,
|
||||
float begin, float end, float n) {
|
||||
return linear_colormap(X,r,g,b,linspace(begin,end, cvRound(n)));
|
||||
}
|
||||
|
||||
// Interpolates from a base colormap.
|
||||
static Mat linear_colormap(InputArray X,
|
||||
InputArray r, InputArray g, InputArray b,
|
||||
InputArray xi);
|
||||
};
|
||||
|
||||
// Equals the GNU Octave colormap "autumn".
|
||||
class Autumn : public ColorMap {
|
||||
public:
|
||||
Autumn() : ColorMap() {
|
||||
init(256);
|
||||
}
|
||||
|
||||
Autumn(int n) : ColorMap() {
|
||||
init(n);
|
||||
}
|
||||
|
||||
void init(int n) {
|
||||
float r[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
|
||||
float g[] = { 0, 0.01587301587301587, 0.03174603174603174, 0.04761904761904762, 0.06349206349206349, 0.07936507936507936, 0.09523809523809523, 0.1111111111111111, 0.126984126984127, 0.1428571428571428, 0.1587301587301587, 0.1746031746031746, 0.1904761904761905, 0.2063492063492063, 0.2222222222222222, 0.2380952380952381, 0.253968253968254, 0.2698412698412698, 0.2857142857142857, 0.3015873015873016, 0.3174603174603174, 0.3333333333333333, 0.3492063492063492, 0.3650793650793651, 0.3809523809523809, 0.3968253968253968, 0.4126984126984127, 0.4285714285714285, 0.4444444444444444, 0.4603174603174603, 0.4761904761904762, 0.492063492063492, 0.5079365079365079, 0.5238095238095238, 0.5396825396825397, 0.5555555555555556, 0.5714285714285714, 0.5873015873015873, 0.6031746031746031, 0.6190476190476191, 0.6349206349206349, 0.6507936507936508, 0.6666666666666666, 0.6825396825396826, 0.6984126984126984, 0.7142857142857143, 0.7301587301587301, 0.746031746031746, 0.7619047619047619, 0.7777777777777778, 0.7936507936507936, 0.8095238095238095, 0.8253968253968254, 0.8412698412698413, 0.8571428571428571, 0.873015873015873, 0.8888888888888888, 0.9047619047619048, 0.9206349206349206, 0.9365079365079365, 0.9523809523809523, 0.9682539682539683, 0.9841269841269841, 1};
|
||||
float b[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
|
||||
Mat X = linspace(0,1,64);
|
||||
this->_lut = ColorMap::linear_colormap(X,
|
||||
Mat(64,1, CV_32FC1, r).clone(), // red
|
||||
Mat(64,1, CV_32FC1, g).clone(), // green
|
||||
Mat(64,1, CV_32FC1, b).clone(), // blue
|
||||
n); // number of sample points
|
||||
}
|
||||
};
|
||||
|
||||
// Equals the GNU Octave colormap "bone".
|
||||
class Bone : public ColorMap {
|
||||
public:
|
||||
Bone() : ColorMap() {
|
||||
init(256);
|
||||
}
|
||||
|
||||
Bone(int n) : ColorMap() {
|
||||
init(n);
|
||||
}
|
||||
|
||||
void init(int n) {
|
||||
float r[] = { 0, 0.01388888888888889, 0.02777777777777778, 0.04166666666666666, 0.05555555555555555, 0.06944444444444445, 0.08333333333333333, 0.09722222222222221, 0.1111111111111111, 0.125, 0.1388888888888889, 0.1527777777777778, 0.1666666666666667, 0.1805555555555556, 0.1944444444444444, 0.2083333333333333, 0.2222222222222222, 0.2361111111111111, 0.25, 0.2638888888888889, 0.2777777777777778, 0.2916666666666666, 0.3055555555555555, 0.3194444444444444, 0.3333333333333333, 0.3472222222222222, 0.3611111111111111, 0.375, 0.3888888888888888, 0.4027777777777777, 0.4166666666666666, 0.4305555555555555, 0.4444444444444444, 0.4583333333333333, 0.4722222222222222, 0.4861111111111112, 0.5, 0.5138888888888888, 0.5277777777777778, 0.5416666666666667, 0.5555555555555556, 0.5694444444444444, 0.5833333333333333, 0.5972222222222222, 0.611111111111111, 0.6249999999999999, 0.6388888888888888, 0.6527777777777778, 0.6726190476190474, 0.6944444444444442, 0.7162698412698412, 0.7380952380952381, 0.7599206349206349, 0.7817460317460316, 0.8035714285714286, 0.8253968253968254, 0.8472222222222221, 0.8690476190476188, 0.8908730158730158, 0.9126984126984128, 0.9345238095238095, 0.9563492063492063, 0.978174603174603, 1};
|
||||
float g[] = { 0, 0.01388888888888889, 0.02777777777777778, 0.04166666666666666, 0.05555555555555555, 0.06944444444444445, 0.08333333333333333, 0.09722222222222221, 0.1111111111111111, 0.125, 0.1388888888888889, 0.1527777777777778, 0.1666666666666667, 0.1805555555555556, 0.1944444444444444, 0.2083333333333333, 0.2222222222222222, 0.2361111111111111, 0.25, 0.2638888888888889, 0.2777777777777778, 0.2916666666666666, 0.3055555555555555, 0.3194444444444444, 0.3353174603174602, 0.3544973544973544, 0.3736772486772486, 0.3928571428571428, 0.412037037037037, 0.4312169312169312, 0.4503968253968254, 0.4695767195767195, 0.4887566137566137, 0.5079365079365078, 0.5271164021164021, 0.5462962962962963, 0.5654761904761904, 0.5846560846560845, 0.6038359788359787, 0.623015873015873, 0.6421957671957671, 0.6613756613756612, 0.6805555555555555, 0.6997354497354497, 0.7189153439153438, 0.7380952380952379, 0.7572751322751322, 0.7764550264550264, 0.7916666666666666, 0.8055555555555555, 0.8194444444444444, 0.8333333333333334, 0.8472222222222222, 0.861111111111111, 0.875, 0.8888888888888888, 0.9027777777777777, 0.9166666666666665, 0.9305555555555555, 0.9444444444444444, 0.9583333333333333, 0.9722222222222221, 0.986111111111111, 1};
|
||||
float b[] = { 0, 0.01917989417989418, 0.03835978835978836, 0.05753968253968253, 0.07671957671957672, 0.09589947089947089, 0.1150793650793651, 0.1342592592592592, 0.1534391534391534, 0.1726190476190476, 0.1917989417989418, 0.210978835978836, 0.2301587301587301, 0.2493386243386243, 0.2685185185185185, 0.2876984126984127, 0.3068783068783069, 0.326058201058201, 0.3452380952380952, 0.3644179894179894, 0.3835978835978835, 0.4027777777777777, 0.4219576719576719, 0.4411375661375661, 0.4583333333333333, 0.4722222222222222, 0.4861111111111111, 0.5, 0.5138888888888888, 0.5277777777777777, 0.5416666666666666, 0.5555555555555556, 0.5694444444444444, 0.5833333333333333, 0.5972222222222222, 0.6111111111111112, 0.625, 0.6388888888888888, 0.6527777777777778, 0.6666666666666667, 0.6805555555555556, 0.6944444444444444, 0.7083333333333333, 0.7222222222222222, 0.736111111111111, 0.7499999999999999, 0.7638888888888888, 0.7777777777777778, 0.7916666666666666, 0.8055555555555555, 0.8194444444444444, 0.8333333333333334, 0.8472222222222222, 0.861111111111111, 0.875, 0.8888888888888888, 0.9027777777777777, 0.9166666666666665, 0.9305555555555555, 0.9444444444444444, 0.9583333333333333, 0.9722222222222221, 0.986111111111111, 1};
|
||||
Mat X = linspace(0,1,64);
|
||||
this->_lut = ColorMap::linear_colormap(X,
|
||||
Mat(64,1, CV_32FC1, r).clone(), // red
|
||||
Mat(64,1, CV_32FC1, g).clone(), // green
|
||||
Mat(64,1, CV_32FC1, b).clone(), // blue
|
||||
n); // number of sample points
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
// Equals the GNU Octave colormap "jet".
|
||||
class Jet : public ColorMap {
|
||||
|
||||
public:
|
||||
Jet() {
|
||||
init(256);
|
||||
}
|
||||
Jet(int n) : ColorMap() {
|
||||
init(n);
|
||||
}
|
||||
|
||||
void init(int n) {
|
||||
// breakpoints
|
||||
Mat X = linspace(0,1,256);
|
||||
// define the basemap
|
||||
float r[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0.00588235294117645,0.02156862745098032,0.03725490196078418,0.05294117647058827,0.06862745098039214,0.084313725490196,0.1000000000000001,0.115686274509804,0.1313725490196078,0.1470588235294117,0.1627450980392156,0.1784313725490196,0.1941176470588235,0.2098039215686274,0.2254901960784315,0.2411764705882353,0.2568627450980392,0.2725490196078431,0.2882352941176469,0.303921568627451,0.3196078431372549,0.3352941176470587,0.3509803921568628,0.3666666666666667,0.3823529411764706,0.3980392156862744,0.4137254901960783,0.4294117647058824,0.4450980392156862,0.4607843137254901,0.4764705882352942,0.4921568627450981,0.5078431372549019,0.5235294117647058,0.5392156862745097,0.5549019607843135,0.5705882352941174,0.5862745098039217,0.6019607843137256,0.6176470588235294,0.6333333333333333,0.6490196078431372,0.664705882352941,0.6803921568627449,0.6960784313725492,0.7117647058823531,0.7274509803921569,0.7431372549019608,0.7588235294117647,0.7745098039215685,0.7901960784313724,0.8058823529411763,0.8215686274509801,0.8372549019607844,0.8529411764705883,0.8686274509803922,0.884313725490196,0.8999999999999999,0.9156862745098038,0.9313725490196076,0.947058823529412,0.9627450980392158,0.9784313725490197,0.9941176470588236,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0.9862745098039216,0.9705882352941178,0.9549019607843139,0.93921568627451,0.9235294117647062,0.9078431372549018,0.892156862745098,0.8764705882352941,0.8607843137254902,0.8450980392156864,0.8294117647058825,0.8137254901960786,0.7980392156862743,0.7823529411764705,0.7666666666666666,0.7509803921568627,0.7352941176470589,0.719607843137255,0.7039215686274511,0.6882352941176473,0.6725490196078434,0.6568627450980391,0.6411764705882352,0.6254901960784314,0.6098039215686275,0.5941176470588236,0.5784313725490198,0.5627450980392159,0.5470588235294116,0.5313725490196077,0.5156862745098039,0.5};
|
||||
float g[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0.001960784313725483,0.01764705882352935,0.03333333333333333,0.0490196078431373,0.06470588235294117,0.08039215686274503,0.09607843137254901,0.111764705882353,0.1274509803921569,0.1431372549019607,0.1588235294117647,0.1745098039215687,0.1901960784313725,0.2058823529411764,0.2215686274509804,0.2372549019607844,0.2529411764705882,0.2686274509803921,0.2843137254901961,0.3,0.3156862745098039,0.3313725490196078,0.3470588235294118,0.3627450980392157,0.3784313725490196,0.3941176470588235,0.4098039215686274,0.4254901960784314,0.4411764705882353,0.4568627450980391,0.4725490196078431,0.4882352941176471,0.503921568627451,0.5196078431372548,0.5352941176470587,0.5509803921568628,0.5666666666666667,0.5823529411764705,0.5980392156862746,0.6137254901960785,0.6294117647058823,0.6450980392156862,0.6607843137254901,0.6764705882352942,0.692156862745098,0.7078431372549019,0.723529411764706,0.7392156862745098,0.7549019607843137,0.7705882352941176,0.7862745098039214,0.8019607843137255,0.8176470588235294,0.8333333333333333,0.8490196078431373,0.8647058823529412,0.8803921568627451,0.8960784313725489,0.9117647058823528,0.9274509803921569,0.9431372549019608,0.9588235294117646,0.9745098039215687,0.9901960784313726,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0.9901960784313726,0.9745098039215687,0.9588235294117649,0.943137254901961,0.9274509803921571,0.9117647058823528,0.8960784313725489,0.8803921568627451,0.8647058823529412,0.8490196078431373,0.8333333333333335,0.8176470588235296,0.8019607843137253,0.7862745098039214,0.7705882352941176,0.7549019607843137,0.7392156862745098,0.723529411764706,0.7078431372549021,0.6921568627450982,0.6764705882352944,0.6607843137254901,0.6450980392156862,0.6294117647058823,0.6137254901960785,0.5980392156862746,0.5823529411764707,0.5666666666666669,0.5509803921568626,0.5352941176470587,0.5196078431372548,0.503921568627451,0.4882352941176471,0.4725490196078432,0.4568627450980394,0.4411764705882355,0.4254901960784316,0.4098039215686273,0.3941176470588235,0.3784313725490196,0.3627450980392157,0.3470588235294119,0.331372549019608,0.3156862745098041,0.2999999999999998,0.284313725490196,0.2686274509803921,0.2529411764705882,0.2372549019607844,0.2215686274509805,0.2058823529411766,0.1901960784313728,0.1745098039215689,0.1588235294117646,0.1431372549019607,0.1274509803921569,0.111764705882353,0.09607843137254912,0.08039215686274526,0.06470588235294139,0.04901960784313708,0.03333333333333321,0.01764705882352935,0.001960784313725483,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
|
||||
float b[] = {0.5,0.5156862745098039,0.5313725490196078,0.5470588235294118,0.5627450980392157,0.5784313725490196,0.5941176470588235,0.6098039215686275,0.6254901960784314,0.6411764705882352,0.6568627450980392,0.6725490196078432,0.6882352941176471,0.7039215686274509,0.7196078431372549,0.7352941176470589,0.7509803921568627,0.7666666666666666,0.7823529411764706,0.7980392156862746,0.8137254901960784,0.8294117647058823,0.8450980392156863,0.8607843137254902,0.8764705882352941,0.892156862745098,0.907843137254902,0.9235294117647059,0.9392156862745098,0.9549019607843137,0.9705882352941176,0.9862745098039216,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0.9941176470588236,0.9784313725490197,0.9627450980392158,0.9470588235294117,0.9313725490196079,0.915686274509804,0.8999999999999999,0.884313725490196,0.8686274509803922,0.8529411764705883,0.8372549019607844,0.8215686274509804,0.8058823529411765,0.7901960784313726,0.7745098039215685,0.7588235294117647,0.7431372549019608,0.7274509803921569,0.7117647058823531,0.696078431372549,0.6803921568627451,0.6647058823529413,0.6490196078431372,0.6333333333333333,0.6176470588235294,0.6019607843137256,0.5862745098039217,0.5705882352941176,0.5549019607843138,0.5392156862745099,0.5235294117647058,0.5078431372549019,0.4921568627450981,0.4764705882352942,0.4607843137254903,0.4450980392156865,0.4294117647058826,0.4137254901960783,0.3980392156862744,0.3823529411764706,0.3666666666666667,0.3509803921568628,0.335294117647059,0.3196078431372551,0.3039215686274508,0.2882352941176469,0.2725490196078431,0.2568627450980392,0.2411764705882353,0.2254901960784315,0.2098039215686276,0.1941176470588237,0.1784313725490199,0.1627450980392156,0.1470588235294117,0.1313725490196078,0.115686274509804,0.1000000000000001,0.08431372549019622,0.06862745098039236,0.05294117647058805,0.03725490196078418,0.02156862745098032,0.00588235294117645,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
|
||||
// now build lookup table
|
||||
this->_lut = ColorMap::linear_colormap(X,
|
||||
Mat(256,1, CV_32FC1, r).clone(), // red
|
||||
Mat(256,1, CV_32FC1, g).clone(), // green
|
||||
Mat(256,1, CV_32FC1, b).clone(), // blue
|
||||
n);
|
||||
}
|
||||
};
|
||||
|
||||
// Equals the GNU Octave colormap "winter".
|
||||
class Winter : public ColorMap {
|
||||
public:
|
||||
Winter() : ColorMap() {
|
||||
init(256);
|
||||
}
|
||||
|
||||
Winter(int n) : ColorMap() {
|
||||
init(n);
|
||||
}
|
||||
|
||||
void init(int n) {
|
||||
float r[] = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
|
||||
float g[] = {0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0};
|
||||
float b[] = {1.0, 0.95, 0.9, 0.85, 0.8, 0.75, 0.7, 0.65, 0.6, 0.55, 0.5};
|
||||
Mat X = linspace(0,1,11);
|
||||
this->_lut = ColorMap::linear_colormap(X,
|
||||
Mat(11,1, CV_32FC1, r).clone(), // red
|
||||
Mat(11,1, CV_32FC1, g).clone(), // green
|
||||
Mat(11,1, CV_32FC1, b).clone(), // blue
|
||||
n); // number of sample points
|
||||
}
|
||||
};
|
||||
|
||||
// Equals the GNU Octave colormap "rainbow".
|
||||
class Rainbow : public ColorMap {
|
||||
public:
|
||||
Rainbow() : ColorMap() {
|
||||
init(256);
|
||||
}
|
||||
|
||||
Rainbow(int n) : ColorMap() {
|
||||
init(n);
|
||||
}
|
||||
|
||||
void init(int n) {
|
||||
float r[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9365079365079367, 0.8571428571428572, 0.7777777777777777, 0.6984126984126986, 0.6190476190476191, 0.53968253968254, 0.4603174603174605, 0.3809523809523814, 0.3015873015873018, 0.2222222222222223, 0.1428571428571432, 0.06349206349206415, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03174603174603208, 0.08465608465608465, 0.1375661375661377, 0.1904761904761907, 0.2433862433862437, 0.2962962962962963, 0.3492063492063493, 0.4021164021164023, 0.4550264550264553, 0.5079365079365079, 0.5608465608465609, 0.6137566137566139, 0.666666666666667};
|
||||
float g[] = { 0, 0.03968253968253968, 0.07936507936507936, 0.119047619047619, 0.1587301587301587, 0.1984126984126984, 0.2380952380952381, 0.2777777777777778, 0.3174603174603174, 0.3571428571428571, 0.3968253968253968, 0.4365079365079365, 0.4761904761904762, 0.5158730158730158, 0.5555555555555556, 0.5952380952380952, 0.6349206349206349, 0.6746031746031745, 0.7142857142857142, 0.753968253968254, 0.7936507936507936, 0.8333333333333333, 0.873015873015873, 0.9126984126984127, 0.9523809523809523, 0.992063492063492, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9841269841269842, 0.9047619047619047, 0.8253968253968256, 0.7460317460317465, 0.666666666666667, 0.587301587301587, 0.5079365079365079, 0.4285714285714288, 0.3492063492063493, 0.2698412698412698, 0.1904761904761907, 0.1111111111111116, 0.03174603174603208, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
|
||||
float b[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01587301587301582, 0.09523809523809534, 0.1746031746031744, 0.2539682539682535, 0.333333333333333, 0.412698412698413, 0.4920634920634921, 0.5714285714285712, 0.6507936507936507, 0.7301587301587302, 0.8095238095238093, 0.8888888888888884, 0.9682539682539679, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
|
||||
Mat X = linspace(0,1,64);
|
||||
this->_lut = ColorMap::linear_colormap(X,
|
||||
Mat(64,1, CV_32FC1, r).clone(), // red
|
||||
Mat(64,1, CV_32FC1, g).clone(), // green
|
||||
Mat(64,1, CV_32FC1, b).clone(), // blue
|
||||
n); // number of sample points
|
||||
}
|
||||
};
|
||||
|
||||
// Equals the GNU Octave colormap "ocean".
|
||||
class Ocean : public ColorMap {
|
||||
public:
|
||||
Ocean() : ColorMap() {
|
||||
init(256);
|
||||
}
|
||||
|
||||
Ocean(int n) : ColorMap() {
|
||||
init(n);
|
||||
}
|
||||
|
||||
void init(int n) {
|
||||
float r[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.04761904761904762, 0.09523809523809523, 0.1428571428571428, 0.1904761904761905, 0.2380952380952381, 0.2857142857142857, 0.3333333333333333, 0.3809523809523809, 0.4285714285714285, 0.4761904761904762, 0.5238095238095238, 0.5714285714285714, 0.6190476190476191, 0.6666666666666666, 0.7142857142857143, 0.7619047619047619, 0.8095238095238095, 0.8571428571428571, 0.9047619047619048, 0.9523809523809523, 1};
|
||||
float g[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02380952380952381, 0.04761904761904762, 0.07142857142857142, 0.09523809523809523, 0.119047619047619, 0.1428571428571428, 0.1666666666666667, 0.1904761904761905, 0.2142857142857143, 0.2380952380952381, 0.2619047619047619, 0.2857142857142857, 0.3095238095238095, 0.3333333333333333, 0.3571428571428572, 0.3809523809523809, 0.4047619047619048, 0.4285714285714285, 0.4523809523809524, 0.4761904761904762, 0.5, 0.5238095238095238, 0.5476190476190477, 0.5714285714285714, 0.5952380952380952, 0.6190476190476191, 0.6428571428571429, 0.6666666666666666, 0.6904761904761905, 0.7142857142857143, 0.7380952380952381, 0.7619047619047619, 0.7857142857142857, 0.8095238095238095, 0.8333333333333334, 0.8571428571428571, 0.8809523809523809, 0.9047619047619048, 0.9285714285714286, 0.9523809523809523, 0.9761904761904762, 1};
|
||||
float b[] = { 0, 0.01587301587301587, 0.03174603174603174, 0.04761904761904762, 0.06349206349206349, 0.07936507936507936, 0.09523809523809523, 0.1111111111111111, 0.126984126984127, 0.1428571428571428, 0.1587301587301587, 0.1746031746031746, 0.1904761904761905, 0.2063492063492063, 0.2222222222222222, 0.2380952380952381, 0.253968253968254, 0.2698412698412698, 0.2857142857142857, 0.3015873015873016, 0.3174603174603174, 0.3333333333333333, 0.3492063492063492, 0.3650793650793651, 0.3809523809523809, 0.3968253968253968, 0.4126984126984127, 0.4285714285714285, 0.4444444444444444, 0.4603174603174603, 0.4761904761904762, 0.492063492063492, 0.5079365079365079, 0.5238095238095238, 0.5396825396825397, 0.5555555555555556, 0.5714285714285714, 0.5873015873015873, 0.6031746031746031, 0.6190476190476191, 0.6349206349206349, 0.6507936507936508, 0.6666666666666666, 0.6825396825396826, 0.6984126984126984, 0.7142857142857143, 0.7301587301587301, 0.746031746031746, 0.7619047619047619, 0.7777777777777778, 0.7936507936507936, 0.8095238095238095, 0.8253968253968254, 0.8412698412698413, 0.8571428571428571, 0.873015873015873, 0.8888888888888888, 0.9047619047619048, 0.9206349206349206, 0.9365079365079365, 0.9523809523809523, 0.9682539682539683, 0.9841269841269841, 1};
|
||||
Mat X = linspace(0,1,64);
|
||||
this->_lut = ColorMap::linear_colormap(X,
|
||||
Mat(64,1, CV_32FC1, r).clone(), // red
|
||||
Mat(64,1, CV_32FC1, g).clone(), // green
|
||||
Mat(64,1, CV_32FC1, b).clone(), // blue
|
||||
n); // number of sample points
|
||||
}
|
||||
};
|
||||
|
||||
// Equals the GNU Octave colormap "summer".
|
||||
class Summer : public ColorMap {
|
||||
public:
|
||||
Summer() : ColorMap() {
|
||||
init(256);
|
||||
}
|
||||
|
||||
Summer(int n) : ColorMap() {
|
||||
init(n);
|
||||
}
|
||||
|
||||
void init(int n) {
|
||||
float r[] = { 0, 0.01587301587301587, 0.03174603174603174, 0.04761904761904762, 0.06349206349206349, 0.07936507936507936, 0.09523809523809523, 0.1111111111111111, 0.126984126984127, 0.1428571428571428, 0.1587301587301587, 0.1746031746031746, 0.1904761904761905, 0.2063492063492063, 0.2222222222222222, 0.2380952380952381, 0.253968253968254, 0.2698412698412698, 0.2857142857142857, 0.3015873015873016, 0.3174603174603174, 0.3333333333333333, 0.3492063492063492, 0.3650793650793651, 0.3809523809523809, 0.3968253968253968, 0.4126984126984127, 0.4285714285714285, 0.4444444444444444, 0.4603174603174603, 0.4761904761904762, 0.492063492063492, 0.5079365079365079, 0.5238095238095238, 0.5396825396825397, 0.5555555555555556, 0.5714285714285714, 0.5873015873015873, 0.6031746031746031, 0.6190476190476191, 0.6349206349206349, 0.6507936507936508, 0.6666666666666666, 0.6825396825396826, 0.6984126984126984, 0.7142857142857143, 0.7301587301587301, 0.746031746031746, 0.7619047619047619, 0.7777777777777778, 0.7936507936507936, 0.8095238095238095, 0.8253968253968254, 0.8412698412698413, 0.8571428571428571, 0.873015873015873, 0.8888888888888888, 0.9047619047619048, 0.9206349206349206, 0.9365079365079365, 0.9523809523809523, 0.9682539682539683, 0.9841269841269841, 1};
|
||||
float g[] = { 0.5, 0.5079365079365079, 0.5158730158730158, 0.5238095238095238, 0.5317460317460317, 0.5396825396825397, 0.5476190476190477, 0.5555555555555556, 0.5634920634920635, 0.5714285714285714, 0.5793650793650793, 0.5873015873015873, 0.5952380952380952, 0.6031746031746031, 0.6111111111111112, 0.6190476190476191, 0.626984126984127, 0.6349206349206349, 0.6428571428571428, 0.6507936507936508, 0.6587301587301587, 0.6666666666666666, 0.6746031746031746, 0.6825396825396826, 0.6904761904761905, 0.6984126984126984, 0.7063492063492063, 0.7142857142857143, 0.7222222222222222, 0.7301587301587301, 0.7380952380952381, 0.746031746031746, 0.753968253968254, 0.7619047619047619, 0.7698412698412698, 0.7777777777777778, 0.7857142857142857, 0.7936507936507937, 0.8015873015873016, 0.8095238095238095, 0.8174603174603174, 0.8253968253968254, 0.8333333333333333, 0.8412698412698413, 0.8492063492063492, 0.8571428571428572, 0.8650793650793651, 0.873015873015873, 0.8809523809523809, 0.8888888888888888, 0.8968253968253967, 0.9047619047619048, 0.9126984126984127, 0.9206349206349207, 0.9285714285714286, 0.9365079365079365, 0.9444444444444444, 0.9523809523809523, 0.9603174603174602, 0.9682539682539683, 0.9761904761904762, 0.9841269841269842, 0.9920634920634921, 1};
|
||||
float b[] = { 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4};
|
||||
Mat X = linspace(0,1,64);
|
||||
this->_lut = ColorMap::linear_colormap(X,
|
||||
Mat(64,1, CV_32FC1, r).clone(), // red
|
||||
Mat(64,1, CV_32FC1, g).clone(), // green
|
||||
Mat(64,1, CV_32FC1, b).clone(), // blue
|
||||
n); // number of sample points
|
||||
}
|
||||
};
|
||||
|
||||
// Equals the GNU Octave colormap "spring".
|
||||
class Spring : public ColorMap {
|
||||
public:
|
||||
Spring() : ColorMap() {
|
||||
init(256);
|
||||
}
|
||||
|
||||
Spring(int n) : ColorMap() {
|
||||
init(n);
|
||||
}
|
||||
|
||||
void init(int n) {
|
||||
float r[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
|
||||
float g[] = { 0, 0.01587301587301587, 0.03174603174603174, 0.04761904761904762, 0.06349206349206349, 0.07936507936507936, 0.09523809523809523, 0.1111111111111111, 0.126984126984127, 0.1428571428571428, 0.1587301587301587, 0.1746031746031746, 0.1904761904761905, 0.2063492063492063, 0.2222222222222222, 0.2380952380952381, 0.253968253968254, 0.2698412698412698, 0.2857142857142857, 0.3015873015873016, 0.3174603174603174, 0.3333333333333333, 0.3492063492063492, 0.3650793650793651, 0.3809523809523809, 0.3968253968253968, 0.4126984126984127, 0.4285714285714285, 0.4444444444444444, 0.4603174603174603, 0.4761904761904762, 0.492063492063492, 0.5079365079365079, 0.5238095238095238, 0.5396825396825397, 0.5555555555555556, 0.5714285714285714, 0.5873015873015873, 0.6031746031746031, 0.6190476190476191, 0.6349206349206349, 0.6507936507936508, 0.6666666666666666, 0.6825396825396826, 0.6984126984126984, 0.7142857142857143, 0.7301587301587301, 0.746031746031746, 0.7619047619047619, 0.7777777777777778, 0.7936507936507936, 0.8095238095238095, 0.8253968253968254, 0.8412698412698413, 0.8571428571428571, 0.873015873015873, 0.8888888888888888, 0.9047619047619048, 0.9206349206349206, 0.9365079365079365, 0.9523809523809523, 0.9682539682539683, 0.9841269841269841, 1};
|
||||
float b[] = { 1, 0.9841269841269842, 0.9682539682539683, 0.9523809523809523, 0.9365079365079365, 0.9206349206349207, 0.9047619047619048, 0.8888888888888888, 0.873015873015873, 0.8571428571428572, 0.8412698412698413, 0.8253968253968254, 0.8095238095238095, 0.7936507936507937, 0.7777777777777778, 0.7619047619047619, 0.746031746031746, 0.7301587301587302, 0.7142857142857143, 0.6984126984126984, 0.6825396825396826, 0.6666666666666667, 0.6507936507936508, 0.6349206349206349, 0.6190476190476191, 0.6031746031746033, 0.5873015873015873, 0.5714285714285714, 0.5555555555555556, 0.5396825396825398, 0.5238095238095238, 0.5079365079365079, 0.4920634920634921, 0.4761904761904762, 0.4603174603174603, 0.4444444444444444, 0.4285714285714286, 0.4126984126984127, 0.3968253968253969, 0.3809523809523809, 0.3650793650793651, 0.3492063492063492, 0.3333333333333334, 0.3174603174603174, 0.3015873015873016, 0.2857142857142857, 0.2698412698412699, 0.253968253968254, 0.2380952380952381, 0.2222222222222222, 0.2063492063492064, 0.1904761904761905, 0.1746031746031746, 0.1587301587301587, 0.1428571428571429, 0.126984126984127, 0.1111111111111112, 0.09523809523809523, 0.07936507936507942, 0.06349206349206349, 0.04761904761904767, 0.03174603174603174, 0.01587301587301593, 0};
|
||||
Mat X = linspace(0,1,64);
|
||||
this->_lut = ColorMap::linear_colormap(X,
|
||||
Mat(64,1, CV_32FC1, r).clone(), // red
|
||||
Mat(64,1, CV_32FC1, g).clone(), // green
|
||||
Mat(64,1, CV_32FC1, b).clone(), // blue
|
||||
n); // number of sample points
|
||||
}
|
||||
};
|
||||
|
||||
// Equals the GNU Octave colormap "cool".
|
||||
class Cool : public ColorMap {
|
||||
public:
|
||||
Cool() : ColorMap() {
|
||||
init(256);
|
||||
}
|
||||
|
||||
Cool(int n) : ColorMap() {
|
||||
init(n);
|
||||
}
|
||||
|
||||
void init(int n) {
|
||||
float r[] = { 0, 0.01587301587301587, 0.03174603174603174, 0.04761904761904762, 0.06349206349206349, 0.07936507936507936, 0.09523809523809523, 0.1111111111111111, 0.126984126984127, 0.1428571428571428, 0.1587301587301587, 0.1746031746031746, 0.1904761904761905, 0.2063492063492063, 0.2222222222222222, 0.2380952380952381, 0.253968253968254, 0.2698412698412698, 0.2857142857142857, 0.3015873015873016, 0.3174603174603174, 0.3333333333333333, 0.3492063492063492, 0.3650793650793651, 0.3809523809523809, 0.3968253968253968, 0.4126984126984127, 0.4285714285714285, 0.4444444444444444, 0.4603174603174603, 0.4761904761904762, 0.492063492063492, 0.5079365079365079, 0.5238095238095238, 0.5396825396825397, 0.5555555555555556, 0.5714285714285714, 0.5873015873015873, 0.6031746031746031, 0.6190476190476191, 0.6349206349206349, 0.6507936507936508, 0.6666666666666666, 0.6825396825396826, 0.6984126984126984, 0.7142857142857143, 0.7301587301587301, 0.746031746031746, 0.7619047619047619, 0.7777777777777778, 0.7936507936507936, 0.8095238095238095, 0.8253968253968254, 0.8412698412698413, 0.8571428571428571, 0.873015873015873, 0.8888888888888888, 0.9047619047619048, 0.9206349206349206, 0.9365079365079365, 0.9523809523809523, 0.9682539682539683, 0.9841269841269841, 1};
|
||||
float g[] = { 1, 0.9841269841269842, 0.9682539682539683, 0.9523809523809523, 0.9365079365079365, 0.9206349206349207, 0.9047619047619048, 0.8888888888888888, 0.873015873015873, 0.8571428571428572, 0.8412698412698413, 0.8253968253968254, 0.8095238095238095, 0.7936507936507937, 0.7777777777777778, 0.7619047619047619, 0.746031746031746, 0.7301587301587302, 0.7142857142857143, 0.6984126984126984, 0.6825396825396826, 0.6666666666666667, 0.6507936507936508, 0.6349206349206349, 0.6190476190476191, 0.6031746031746033, 0.5873015873015873, 0.5714285714285714, 0.5555555555555556, 0.5396825396825398, 0.5238095238095238, 0.5079365079365079, 0.4920634920634921, 0.4761904761904762, 0.4603174603174603, 0.4444444444444444, 0.4285714285714286, 0.4126984126984127, 0.3968253968253969, 0.3809523809523809, 0.3650793650793651, 0.3492063492063492, 0.3333333333333334, 0.3174603174603174, 0.3015873015873016, 0.2857142857142857, 0.2698412698412699, 0.253968253968254, 0.2380952380952381, 0.2222222222222222, 0.2063492063492064, 0.1904761904761905, 0.1746031746031746, 0.1587301587301587, 0.1428571428571429, 0.126984126984127, 0.1111111111111112, 0.09523809523809523, 0.07936507936507942, 0.06349206349206349, 0.04761904761904767, 0.03174603174603174, 0.01587301587301593, 0};
|
||||
float b[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
|
||||
Mat X = linspace(0,1,64);
|
||||
this->_lut = ColorMap::linear_colormap(X,
|
||||
Mat(64,1, CV_32FC1, r).clone(), // red
|
||||
Mat(64,1, CV_32FC1, g).clone(), // green
|
||||
Mat(64,1, CV_32FC1, b).clone(), // blue
|
||||
n); // number of sample points
|
||||
}
|
||||
};
|
||||
|
||||
// Equals the GNU Octave colormap "hsv".
|
||||
class HSV : public ColorMap {
|
||||
public:
|
||||
HSV() : ColorMap() {
|
||||
init(256);
|
||||
}
|
||||
|
||||
HSV(int n) : ColorMap() {
|
||||
init(n);
|
||||
}
|
||||
|
||||
void init(int n) {
|
||||
float r[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9523809523809526, 0.8571428571428568, 0.7619047619047614, 0.6666666666666665, 0.5714285714285716, 0.4761904761904763, 0.3809523809523805, 0.2857142857142856, 0.1904761904761907, 0.0952380952380949, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.09523809523809557, 0.1904761904761905, 0.2857142857142854, 0.3809523809523809, 0.4761904761904765, 0.5714285714285714, 0.6666666666666663, 0.7619047619047619, 0.8571428571428574, 0.9523809523809523, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
|
||||
float g[] = { 0, 0.09523809523809523, 0.1904761904761905, 0.2857142857142857, 0.3809523809523809, 0.4761904761904762, 0.5714285714285714, 0.6666666666666666, 0.7619047619047619, 0.8571428571428571, 0.9523809523809523, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9523809523809526, 0.8571428571428577, 0.7619047619047619, 0.6666666666666665, 0.5714285714285716, 0.4761904761904767, 0.3809523809523814, 0.2857142857142856, 0.1904761904761907, 0.09523809523809579, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
|
||||
float b[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.09523809523809523, 0.1904761904761905, 0.2857142857142857, 0.3809523809523809, 0.4761904761904762, 0.5714285714285714, 0.6666666666666666, 0.7619047619047619, 0.8571428571428571, 0.9523809523809523, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9523809523809526, 0.8571428571428577, 0.7619047619047614, 0.6666666666666665, 0.5714285714285716, 0.4761904761904767, 0.3809523809523805, 0.2857142857142856, 0.1904761904761907, 0.09523809523809579, 0};
|
||||
Mat X = linspace(0,1,64);
|
||||
this->_lut = ColorMap::linear_colormap(X,
|
||||
Mat(64,1, CV_32FC1, r).clone(), // red
|
||||
Mat(64,1, CV_32FC1, g).clone(), // green
|
||||
Mat(64,1, CV_32FC1, b).clone(), // blue
|
||||
n); // number of sample points
|
||||
}
|
||||
};
|
||||
|
||||
// Equals the GNU Octave colormap "pink".
|
||||
class Pink : public ColorMap {
|
||||
public:
|
||||
Pink() : ColorMap() {
|
||||
init(256);
|
||||
}
|
||||
|
||||
Pink(int n) : ColorMap() {
|
||||
init(n);
|
||||
}
|
||||
|
||||
void init(int n) {
|
||||
float r[] = { 0, 0.1571348402636772, 0.2222222222222222, 0.2721655269759087, 0.3142696805273544, 0.3513641844631533, 0.3849001794597505, 0.415739709641549, 0.4444444444444444, 0.4714045207910317, 0.4969039949999532, 0.5211573066470477, 0.5443310539518174, 0.5665577237325317, 0.5879447357921312, 0.6085806194501846, 0.6285393610547089, 0.6478835438717, 0.6666666666666666, 0.6849348892187751, 0.7027283689263065, 0.7200822998230956, 0.7370277311900888, 0.753592220347252, 0.7663560447348133, 0.7732293307186413, 0.7800420555749596, 0.7867957924694432, 0.7934920476158722, 0.8001322641986387, 0.8067178260046388, 0.8132500607904444, 0.8197302434079591, 0.8261595987094034, 0.8325393042503717, 0.8388704928078611, 0.8451542547285166, 0.8513916401208816, 0.8575836609041332, 0.8637312927246217, 0.8698354767504924, 0.8758971213537393, 0.8819171036881968, 0.8878962711712378, 0.8938354428762595, 0.8997354108424372, 0.9055969413076769, 0.9114207758701963, 0.9172076325837248, 0.9229582069908971, 0.9286731730990523, 0.9343531843023135, 0.9399988742535192, 0.9456108576893002, 0.9511897312113418, 0.9567360740266436, 0.9622504486493763, 0.9677334015667416, 0.9731854638710686, 0.9786071518602129, 0.9839989676081821, 0.9893613995077727, 0.9946949227868761, 1};
|
||||
float g[] = { 0, 0.1028688999747279, 0.1454785934906616, 0.1781741612749496, 0.2057377999494559, 0.2300218531141181, 0.2519763153394848, 0.2721655269759087, 0.2909571869813232, 0.3086066999241838, 0.3253000243161777, 0.3411775438127727, 0.3563483225498992, 0.3708990935094579, 0.3849001794597505, 0.3984095364447979, 0.4114755998989117, 0.4241393401869012, 0.4364357804719847, 0.4483951394230328, 0.4600437062282361, 0.4714045207910317, 0.4824979096371639, 0.4933419132673033, 0.5091750772173156, 0.5328701692569688, 0.5555555555555556, 0.5773502691896257, 0.5983516452371671, 0.6186404847588913, 0.6382847385042254, 0.6573421981221795, 0.6758625033664688, 0.6938886664887108, 0.7114582486036499, 0.7286042804780002, 0.7453559924999299, 0.7617394000445604, 0.7777777777777778, 0.7934920476158723, 0.8089010988089465, 0.8240220541217402, 0.8388704928078611, 0.8534606386520677, 0.8678055195451838, 0.8819171036881968, 0.8958064164776166, 0.9094836413191612, 0.9172076325837248, 0.9229582069908971, 0.9286731730990523, 0.9343531843023135, 0.9399988742535192, 0.9456108576893002, 0.9511897312113418, 0.9567360740266436, 0.9622504486493763, 0.9677334015667416, 0.9731854638710686, 0.9786071518602129, 0.9839989676081821, 0.9893613995077727, 0.9946949227868761, 1};
|
||||
float b[] = { 0, 0.1028688999747279, 0.1454785934906616, 0.1781741612749496, 0.2057377999494559, 0.2300218531141181, 0.2519763153394848, 0.2721655269759087, 0.2909571869813232, 0.3086066999241838, 0.3253000243161777, 0.3411775438127727, 0.3563483225498992, 0.3708990935094579, 0.3849001794597505, 0.3984095364447979, 0.4114755998989117, 0.4241393401869012, 0.4364357804719847, 0.4483951394230328, 0.4600437062282361, 0.4714045207910317, 0.4824979096371639, 0.4933419132673033, 0.5039526306789697, 0.5143444998736397, 0.5245305283129621, 0.5345224838248488, 0.5443310539518174, 0.5539659798925444, 0.563436169819011, 0.5727497953228163, 0.5819143739626463, 0.5909368402852788, 0.5998236072282915, 0.6085806194501846, 0.6172133998483676, 0.6257270902992705, 0.6341264874742278, 0.642416074439621, 0.6506000486323554, 0.6586823467062358, 0.6666666666666666, 0.6745564876468501, 0.6823550876255453, 0.6900655593423541, 0.6976908246297114, 0.7052336473499384, 0.7237468644557459, 0.7453559924999298, 0.7663560447348133, 0.7867957924694432, 0.8067178260046388, 0.8261595987094034, 0.8451542547285166, 0.8637312927246217, 0.8819171036881968, 0.8997354108424372, 0.9172076325837248, 0.9343531843023135, 0.9511897312113418, 0.9677334015667416, 0.9839989676081821, 1};
|
||||
Mat X = linspace(0,1,64);
|
||||
this->_lut = ColorMap::linear_colormap(X,
|
||||
Mat(64,1, CV_32FC1, r).clone(), // red
|
||||
Mat(64,1, CV_32FC1, g).clone(), // green
|
||||
Mat(64,1, CV_32FC1, b).clone(), // blue
|
||||
n); // number of sample points
|
||||
}
|
||||
};
|
||||
|
||||
// Equals the GNU Octave colormap "hot".
|
||||
class Hot : public ColorMap {
|
||||
public:
|
||||
Hot() : ColorMap() {
|
||||
init(256);
|
||||
}
|
||||
|
||||
Hot(int n) : ColorMap() {
|
||||
init(n);
|
||||
}
|
||||
|
||||
void init(int n) {
|
||||
float r[] = { 0, 0.03968253968253968, 0.07936507936507936, 0.119047619047619, 0.1587301587301587, 0.1984126984126984, 0.2380952380952381, 0.2777777777777778, 0.3174603174603174, 0.3571428571428571, 0.3968253968253968, 0.4365079365079365, 0.4761904761904762, 0.5158730158730158, 0.5555555555555556, 0.5952380952380952, 0.6349206349206349, 0.6746031746031745, 0.7142857142857142, 0.753968253968254, 0.7936507936507936, 0.8333333333333333, 0.873015873015873, 0.9126984126984127, 0.9523809523809523, 0.992063492063492, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
|
||||
float g[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03174603174603163, 0.0714285714285714, 0.1111111111111112, 0.1507936507936507, 0.1904761904761905, 0.23015873015873, 0.2698412698412698, 0.3095238095238093, 0.3492063492063491, 0.3888888888888888, 0.4285714285714284, 0.4682539682539679, 0.5079365079365079, 0.5476190476190477, 0.5873015873015872, 0.6269841269841268, 0.6666666666666665, 0.7063492063492065, 0.746031746031746, 0.7857142857142856, 0.8253968253968254, 0.8650793650793651, 0.9047619047619047, 0.9444444444444442, 0.984126984126984, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
|
||||
float b[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.04761904761904745, 0.1269841269841265, 0.2063492063492056, 0.2857142857142856, 0.3650793650793656, 0.4444444444444446, 0.5238095238095237, 0.6031746031746028, 0.6825396825396828, 0.7619047619047619, 0.8412698412698409, 0.92063492063492, 1};
|
||||
Mat X = linspace(0,1,64);
|
||||
this->_lut = ColorMap::linear_colormap(X,
|
||||
Mat(64,1, CV_32FC1, r).clone(), // red
|
||||
Mat(64,1, CV_32FC1, g).clone(), // green
|
||||
Mat(64,1, CV_32FC1, b).clone(), // blue
|
||||
n); // number of sample points
|
||||
}
|
||||
};
|
||||
|
||||
void ColorMap::operator()(InputArray _src, OutputArray _dst) const
|
||||
{
|
||||
if(_lut.total() != 256)
|
||||
CV_Error(Error::StsAssert, "cv::LUT only supports tables of size 256.");
|
||||
Mat src = _src.getMat();
|
||||
// Return original matrix if wrong type is given (is fail loud better here?)
|
||||
if(src.type() != CV_8UC1 && src.type() != CV_8UC3)
|
||||
{
|
||||
src.copyTo(_dst);
|
||||
return;
|
||||
}
|
||||
// Turn into a BGR matrix into its grayscale representation.
|
||||
if(src.type() == CV_8UC3)
|
||||
cvtColor(src.clone(), src, COLOR_BGR2GRAY);
|
||||
cvtColor(src.clone(), src, COLOR_GRAY2BGR);
|
||||
// Apply the ColorMap.
|
||||
LUT(src, _lut, _dst);
|
||||
}
|
||||
|
||||
Mat ColorMap::linear_colormap(InputArray X,
|
||||
InputArray r, InputArray g, InputArray b,
|
||||
InputArray xi) {
|
||||
Mat lut, lut8;
|
||||
Mat planes[] = {
|
||||
interp1(X, b, xi),
|
||||
interp1(X, g, xi),
|
||||
interp1(X, r, xi)};
|
||||
merge(planes, 3, lut);
|
||||
lut.convertTo(lut8, CV_8U, 255.);
|
||||
return lut8;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void applyColorMap(InputArray src, OutputArray dst, int colormap)
|
||||
{
|
||||
colormap::ColorMap* cm =
|
||||
colormap == COLORMAP_AUTUMN ? (colormap::ColorMap*)(new colormap::Autumn) :
|
||||
colormap == COLORMAP_BONE ? (colormap::ColorMap*)(new colormap::Bone) :
|
||||
colormap == COLORMAP_COOL ? (colormap::ColorMap*)(new colormap::Cool) :
|
||||
colormap == COLORMAP_HOT ? (colormap::ColorMap*)(new colormap::Hot) :
|
||||
colormap == COLORMAP_HSV ? (colormap::ColorMap*)(new colormap::HSV) :
|
||||
colormap == COLORMAP_JET ? (colormap::ColorMap*)(new colormap::Jet) :
|
||||
colormap == COLORMAP_OCEAN ? (colormap::ColorMap*)(new colormap::Ocean) :
|
||||
colormap == COLORMAP_PINK ? (colormap::ColorMap*)(new colormap::Pink) :
|
||||
colormap == COLORMAP_RAINBOW ? (colormap::ColorMap*)(new colormap::Rainbow) :
|
||||
colormap == COLORMAP_SPRING ? (colormap::ColorMap*)(new colormap::Spring) :
|
||||
colormap == COLORMAP_SUMMER ? (colormap::ColorMap*)(new colormap::Summer) :
|
||||
colormap == COLORMAP_WINTER ? (colormap::ColorMap*)(new colormap::Winter) : 0;
|
||||
|
||||
if( !cm )
|
||||
CV_Error( Error::StsBadArg, "Unknown colormap id; use one of COLORMAP_*");
|
||||
|
||||
(*cm)(src, dst);
|
||||
|
||||
delete cm;
|
||||
}
|
||||
}
|
@ -1,134 +0,0 @@
|
||||
//*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
#include "precomp.hpp"
|
||||
#include "opencv2/contrib/hybridtracker.hpp"
|
||||
|
||||
using namespace cv;
|
||||
|
||||
CvMeanShiftTracker::CvMeanShiftTracker(CvMeanShiftTrackerParams _params) : params(_params)
|
||||
{
|
||||
}
|
||||
|
||||
CvMeanShiftTracker::~CvMeanShiftTracker()
|
||||
{
|
||||
}
|
||||
|
||||
void CvMeanShiftTracker::newTrackingWindow(Mat image, Rect selection)
|
||||
{
|
||||
hist.release();
|
||||
int channels[] = { 0, 0 , 1, 1};
|
||||
float hrange[] = { 0, 180 };
|
||||
float srange[] = { 0, 1 };
|
||||
const float* ranges[] = {hrange, srange};
|
||||
|
||||
cvtColor(image, hsv, COLOR_BGR2HSV);
|
||||
inRange(hsv, Scalar(0, 30, MIN(10, 256)), Scalar(180, 256, MAX(10, 256)), mask);
|
||||
|
||||
hue.create(hsv.size(), CV_8UC2);
|
||||
mixChannels(&hsv, 1, &hue, 1, channels, 2);
|
||||
|
||||
Mat roi(hue, selection);
|
||||
Mat mskroi(mask, selection);
|
||||
int ch[] = {0, 1};
|
||||
int chsize[] = {32, 32};
|
||||
calcHist(&roi, 1, ch, mskroi, hist, 1, chsize, ranges);
|
||||
normalize(hist, hist, 0, 255, CV_MINMAX);
|
||||
|
||||
prev_trackwindow = selection;
|
||||
}
|
||||
|
||||
RotatedRect CvMeanShiftTracker::updateTrackingWindow(Mat image)
|
||||
{
|
||||
int channels[] = { 0, 0 , 1, 1};
|
||||
float hrange[] = { 0, 180 };
|
||||
float srange[] = { 0, 1 };
|
||||
const float* ranges[] = {hrange, srange};
|
||||
|
||||
cvtColor(image, hsv, COLOR_BGR2HSV);
|
||||
inRange(hsv, Scalar(0, 30, MIN(10, 256)), Scalar(180, 256, MAX(10, 256)), mask);
|
||||
hue.create(hsv.size(), CV_8UC2);
|
||||
mixChannels(&hsv, 1, &hue, 1, channels, 2);
|
||||
int ch[] = {0, 1};
|
||||
calcBackProject(&hue, 1, ch, hist, backproj, ranges);
|
||||
backproj &= mask;
|
||||
|
||||
prev_trackbox = CamShift(backproj, prev_trackwindow, TermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1));
|
||||
int cols = backproj.cols, rows = backproj.rows, r = (MIN(cols, rows) + 5) / 6;
|
||||
prev_trackwindow = Rect(prev_trackwindow.x - r, prev_trackwindow.y - r, prev_trackwindow.x + r,
|
||||
prev_trackwindow.y + r) & Rect(0, 0, cols, rows);
|
||||
|
||||
prev_center.x = (float)(prev_trackwindow.x + prev_trackwindow.width / 2);
|
||||
prev_center.y = (float)(prev_trackwindow.y + prev_trackwindow.height / 2);
|
||||
|
||||
#ifdef DEBUG_HYTRACKER
|
||||
ellipse(image, prev_trackbox, Scalar(0, 0, 255), 1, CV_AA);
|
||||
#endif
|
||||
|
||||
return prev_trackbox;
|
||||
}
|
||||
|
||||
Mat CvMeanShiftTracker::getHistogramProjection(int type)
|
||||
{
|
||||
Mat ms_backproj_f(backproj.size(), type);
|
||||
backproj.convertTo(ms_backproj_f, type);
|
||||
return ms_backproj_f;
|
||||
}
|
||||
|
||||
void CvMeanShiftTracker::setTrackingWindow(Rect window)
|
||||
{
|
||||
prev_trackwindow = window;
|
||||
}
|
||||
|
||||
Rect CvMeanShiftTracker::getTrackingWindow()
|
||||
{
|
||||
return prev_trackwindow;
|
||||
}
|
||||
|
||||
RotatedRect CvMeanShiftTracker::getTrackingEllipse()
|
||||
{
|
||||
return prev_trackbox;
|
||||
}
|
||||
|
||||
Point2f CvMeanShiftTracker::getTrackingCenter()
|
||||
{
|
||||
return prev_center;
|
||||
}
|
@ -1,43 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
@ -1,877 +0,0 @@
|
||||
#if defined(__linux__) || defined(LINUX) || defined(__APPLE__) || defined(ANDROID)
|
||||
#include "opencv2/contrib/detection_based_tracker.hpp"
|
||||
#include "opencv2/core/utility.hpp"
|
||||
|
||||
#include <pthread.h>
|
||||
|
||||
#if defined(DEBUG) || defined(_DEBUG)
|
||||
#undef DEBUGLOGS
|
||||
#define DEBUGLOGS 1
|
||||
#endif
|
||||
|
||||
#ifndef DEBUGLOGS
|
||||
#define DEBUGLOGS 0
|
||||
#endif
|
||||
|
||||
#ifdef ANDROID
|
||||
#include <android/log.h>
|
||||
#define LOG_TAG "OBJECT_DETECTOR"
|
||||
#define LOGD0(...) ((void)__android_log_print(ANDROID_LOG_DEBUG, LOG_TAG, __VA_ARGS__))
|
||||
#define LOGI0(...) ((void)__android_log_print(ANDROID_LOG_INFO, LOG_TAG, __VA_ARGS__))
|
||||
#define LOGW0(...) ((void)__android_log_print(ANDROID_LOG_WARN, LOG_TAG, __VA_ARGS__))
|
||||
#define LOGE0(...) ((void)__android_log_print(ANDROID_LOG_ERROR, LOG_TAG, __VA_ARGS__))
|
||||
#else
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#define LOGD0(_str, ...) do{printf(_str , ## __VA_ARGS__); printf("\n");fflush(stdout);} while(0)
|
||||
#define LOGI0(_str, ...) do{printf(_str , ## __VA_ARGS__); printf("\n");fflush(stdout);} while(0)
|
||||
#define LOGW0(_str, ...) do{printf(_str , ## __VA_ARGS__); printf("\n");fflush(stdout);} while(0)
|
||||
#define LOGE0(_str, ...) do{printf(_str , ## __VA_ARGS__); printf("\n");fflush(stdout);} while(0)
|
||||
#endif
|
||||
|
||||
#if DEBUGLOGS
|
||||
#define LOGD(_str, ...) LOGD0(_str , ## __VA_ARGS__)
|
||||
#define LOGI(_str, ...) LOGI0(_str , ## __VA_ARGS__)
|
||||
#define LOGW(_str, ...) LOGW0(_str , ## __VA_ARGS__)
|
||||
#define LOGE(_str, ...) LOGE0(_str , ## __VA_ARGS__)
|
||||
#else
|
||||
#define LOGD(...) do{} while(0)
|
||||
#define LOGI(...) do{} while(0)
|
||||
#define LOGW(...) do{} while(0)
|
||||
#define LOGE(...) do{} while(0)
|
||||
#endif
|
||||
|
||||
|
||||
using namespace cv;
|
||||
|
||||
static inline cv::Point2f centerRect(const cv::Rect& r)
|
||||
{
|
||||
return cv::Point2f(r.x+((float)r.width)/2, r.y+((float)r.height)/2);
|
||||
}
|
||||
|
||||
static inline cv::Rect scale_rect(const cv::Rect& r, float scale)
|
||||
{
|
||||
cv::Point2f m=centerRect(r);
|
||||
float width = r.width * scale;
|
||||
float height = r.height * scale;
|
||||
int x=cvRound(m.x - width/2);
|
||||
int y=cvRound(m.y - height/2);
|
||||
|
||||
return cv::Rect(x, y, cvRound(width), cvRound(height));
|
||||
}
|
||||
|
||||
namespace cv
|
||||
{
|
||||
void* workcycleObjectDetectorFunction(void* p);
|
||||
}
|
||||
|
||||
class cv::DetectionBasedTracker::SeparateDetectionWork
|
||||
{
|
||||
public:
|
||||
SeparateDetectionWork(cv::DetectionBasedTracker& _detectionBasedTracker, cv::Ptr<DetectionBasedTracker::IDetector> _detector);
|
||||
virtual ~SeparateDetectionWork();
|
||||
bool communicateWithDetectingThread(const Mat& imageGray, std::vector<Rect>& rectsWhereRegions);
|
||||
bool run();
|
||||
void stop();
|
||||
void resetTracking();
|
||||
|
||||
inline bool isWorking()
|
||||
{
|
||||
return (stateThread==STATE_THREAD_WORKING_SLEEPING) || (stateThread==STATE_THREAD_WORKING_WITH_IMAGE);
|
||||
}
|
||||
inline void lock()
|
||||
{
|
||||
pthread_mutex_lock(&mutex);
|
||||
}
|
||||
inline void unlock()
|
||||
{
|
||||
pthread_mutex_unlock(&mutex);
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
DetectionBasedTracker& detectionBasedTracker;
|
||||
cv::Ptr<DetectionBasedTracker::IDetector> cascadeInThread;
|
||||
|
||||
pthread_t second_workthread;
|
||||
pthread_mutex_t mutex;
|
||||
pthread_cond_t objectDetectorRun;
|
||||
pthread_cond_t objectDetectorThreadStartStop;
|
||||
|
||||
std::vector<cv::Rect> resultDetect;
|
||||
volatile bool isObjectDetectingReady;
|
||||
volatile bool shouldObjectDetectingResultsBeForgot;
|
||||
|
||||
enum StateSeparatedThread {
|
||||
STATE_THREAD_STOPPED=0,
|
||||
STATE_THREAD_WORKING_SLEEPING,
|
||||
STATE_THREAD_WORKING_WITH_IMAGE,
|
||||
STATE_THREAD_WORKING,
|
||||
STATE_THREAD_STOPPING
|
||||
};
|
||||
volatile StateSeparatedThread stateThread;
|
||||
|
||||
cv::Mat imageSeparateDetecting;
|
||||
|
||||
void workcycleObjectDetector();
|
||||
friend void* workcycleObjectDetectorFunction(void* p);
|
||||
|
||||
long long timeWhenDetectingThreadStartedWork;
|
||||
};
|
||||
|
||||
cv::DetectionBasedTracker::SeparateDetectionWork::SeparateDetectionWork(DetectionBasedTracker& _detectionBasedTracker, cv::Ptr<DetectionBasedTracker::IDetector> _detector)
|
||||
:detectionBasedTracker(_detectionBasedTracker),
|
||||
cascadeInThread(),
|
||||
isObjectDetectingReady(false),
|
||||
shouldObjectDetectingResultsBeForgot(false),
|
||||
stateThread(STATE_THREAD_STOPPED),
|
||||
timeWhenDetectingThreadStartedWork(-1)
|
||||
{
|
||||
CV_Assert(_detector);
|
||||
|
||||
cascadeInThread = _detector;
|
||||
|
||||
int res=0;
|
||||
res=pthread_mutex_init(&mutex, NULL);//TODO: should be attributes?
|
||||
if (res) {
|
||||
LOGE("ERROR in DetectionBasedTracker::SeparateDetectionWork::SeparateDetectionWork in pthread_mutex_init(&mutex, NULL) is %d", res);
|
||||
throw(std::exception());
|
||||
}
|
||||
res=pthread_cond_init (&objectDetectorRun, NULL);
|
||||
if (res) {
|
||||
LOGE("ERROR in DetectionBasedTracker::SeparateDetectionWork::SeparateDetectionWork in pthread_cond_init(&objectDetectorRun,, NULL) is %d", res);
|
||||
pthread_mutex_destroy(&mutex);
|
||||
throw(std::exception());
|
||||
}
|
||||
res=pthread_cond_init (&objectDetectorThreadStartStop, NULL);
|
||||
if (res) {
|
||||
LOGE("ERROR in DetectionBasedTracker::SeparateDetectionWork::SeparateDetectionWork in pthread_cond_init(&objectDetectorThreadStartStop,, NULL) is %d", res);
|
||||
pthread_cond_destroy(&objectDetectorRun);
|
||||
pthread_mutex_destroy(&mutex);
|
||||
throw(std::exception());
|
||||
}
|
||||
}
|
||||
|
||||
cv::DetectionBasedTracker::SeparateDetectionWork::~SeparateDetectionWork()
|
||||
{
|
||||
if(stateThread!=STATE_THREAD_STOPPED) {
|
||||
LOGE("\n\n\nATTENTION!!! dangerous algorithm error: destructor DetectionBasedTracker::DetectionBasedTracker::~SeparateDetectionWork is called before stopping the workthread");
|
||||
}
|
||||
|
||||
pthread_cond_destroy(&objectDetectorThreadStartStop);
|
||||
pthread_cond_destroy(&objectDetectorRun);
|
||||
pthread_mutex_destroy(&mutex);
|
||||
}
|
||||
bool cv::DetectionBasedTracker::SeparateDetectionWork::run()
|
||||
{
|
||||
LOGD("DetectionBasedTracker::SeparateDetectionWork::run() --- start");
|
||||
pthread_mutex_lock(&mutex);
|
||||
if (stateThread != STATE_THREAD_STOPPED) {
|
||||
LOGE("DetectionBasedTracker::SeparateDetectionWork::run is called while the previous run is not stopped");
|
||||
pthread_mutex_unlock(&mutex);
|
||||
return false;
|
||||
}
|
||||
stateThread=STATE_THREAD_WORKING_SLEEPING;
|
||||
pthread_create(&second_workthread, NULL, workcycleObjectDetectorFunction, (void*)this); //TODO: add attributes?
|
||||
pthread_cond_wait(&objectDetectorThreadStartStop, &mutex);
|
||||
pthread_mutex_unlock(&mutex);
|
||||
LOGD("DetectionBasedTracker::SeparateDetectionWork::run --- end");
|
||||
return true;
|
||||
}
|
||||
|
||||
#define CATCH_ALL_AND_LOG(_block) \
|
||||
do { \
|
||||
try { \
|
||||
_block; \
|
||||
break; \
|
||||
} \
|
||||
catch(cv::Exception& e) { \
|
||||
LOGE0("\n %s: ERROR: OpenCV Exception caught: \n'%s'\n\n", CV_Func, e.what()); \
|
||||
} catch(std::exception& e) { \
|
||||
LOGE0("\n %s: ERROR: Exception caught: \n'%s'\n\n", CV_Func, e.what()); \
|
||||
} catch(...) { \
|
||||
LOGE0("\n %s: ERROR: UNKNOWN Exception caught\n\n", CV_Func); \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
void* cv::workcycleObjectDetectorFunction(void* p)
|
||||
{
|
||||
CATCH_ALL_AND_LOG({ ((cv::DetectionBasedTracker::SeparateDetectionWork*)p)->workcycleObjectDetector(); });
|
||||
try{
|
||||
((cv::DetectionBasedTracker::SeparateDetectionWork*)p)->stateThread = cv::DetectionBasedTracker::SeparateDetectionWork::STATE_THREAD_STOPPED;
|
||||
} catch(...) {
|
||||
LOGE0("DetectionBasedTracker: workcycleObjectDetectorFunction: ERROR concerning pointer, received as the function parameter");
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void cv::DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector()
|
||||
{
|
||||
static double freq = getTickFrequency();
|
||||
LOGD("DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector() --- start");
|
||||
std::vector<Rect> objects;
|
||||
|
||||
CV_Assert(stateThread==STATE_THREAD_WORKING_SLEEPING);
|
||||
pthread_mutex_lock(&mutex);
|
||||
{
|
||||
pthread_cond_signal(&objectDetectorThreadStartStop);
|
||||
|
||||
LOGD("DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector() --- before waiting");
|
||||
CV_Assert(stateThread==STATE_THREAD_WORKING_SLEEPING);
|
||||
pthread_cond_wait(&objectDetectorRun, &mutex);
|
||||
if (isWorking()) {
|
||||
stateThread=STATE_THREAD_WORKING_WITH_IMAGE;
|
||||
}
|
||||
LOGD("DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector() --- after waiting");
|
||||
}
|
||||
pthread_mutex_unlock(&mutex);
|
||||
|
||||
bool isFirstStep=true;
|
||||
|
||||
isObjectDetectingReady=false;
|
||||
|
||||
while(isWorking())
|
||||
{
|
||||
LOGD("DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector() --- next step");
|
||||
|
||||
if (! isFirstStep) {
|
||||
LOGD("DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector() --- before waiting");
|
||||
CV_Assert(stateThread==STATE_THREAD_WORKING_SLEEPING);
|
||||
|
||||
pthread_mutex_lock(&mutex);
|
||||
if (!isWorking()) {//it is a rare case, but may cause a crash
|
||||
LOGD("DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector() --- go out from the workcycle from inner part of lock just before waiting");
|
||||
pthread_mutex_unlock(&mutex);
|
||||
break;
|
||||
}
|
||||
CV_Assert(stateThread==STATE_THREAD_WORKING_SLEEPING);
|
||||
pthread_cond_wait(&objectDetectorRun, &mutex);
|
||||
if (isWorking()) {
|
||||
stateThread=STATE_THREAD_WORKING_WITH_IMAGE;
|
||||
}
|
||||
pthread_mutex_unlock(&mutex);
|
||||
|
||||
LOGD("DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector() --- after waiting");
|
||||
} else {
|
||||
isFirstStep=false;
|
||||
}
|
||||
|
||||
if (!isWorking()) {
|
||||
LOGD("DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector() --- go out from the workcycle just after waiting");
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
if (imageSeparateDetecting.empty()) {
|
||||
LOGD("DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector() --- imageSeparateDetecting is empty, continue");
|
||||
continue;
|
||||
}
|
||||
LOGD("DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector() --- start handling imageSeparateDetecting, img.size=%dx%d, img.data=0x%p",
|
||||
imageSeparateDetecting.size().width, imageSeparateDetecting.size().height, (void*)imageSeparateDetecting.data);
|
||||
|
||||
|
||||
int64 t1_detect=getTickCount();
|
||||
|
||||
cascadeInThread->detect(imageSeparateDetecting, objects);
|
||||
|
||||
/*cascadeInThread.detectMultiScale( imageSeparateDetecting, objects,
|
||||
detectionBasedTracker.parameters.scaleFactor, detectionBasedTracker.parameters.minNeighbors, 0
|
||||
|CV_HAAR_SCALE_IMAGE
|
||||
,
|
||||
min_objectSize,
|
||||
max_objectSize
|
||||
);
|
||||
*/
|
||||
|
||||
LOGD("DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector() --- end handling imageSeparateDetecting");
|
||||
|
||||
if (!isWorking()) {
|
||||
LOGD("DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector() --- go out from the workcycle just after detecting");
|
||||
break;
|
||||
}
|
||||
|
||||
int64 t2_detect = getTickCount();
|
||||
int64 dt_detect = t2_detect-t1_detect;
|
||||
double dt_detect_ms=((double)dt_detect)/freq * 1000.0;
|
||||
(void)(dt_detect_ms);
|
||||
|
||||
LOGI("DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector() --- objects num==%d, t_ms=%.4f", (int)objects.size(), dt_detect_ms);
|
||||
|
||||
pthread_mutex_lock(&mutex);
|
||||
if (!shouldObjectDetectingResultsBeForgot) {
|
||||
resultDetect=objects;
|
||||
isObjectDetectingReady=true;
|
||||
} else { //shouldObjectDetectingResultsBeForgot==true
|
||||
resultDetect.clear();
|
||||
isObjectDetectingReady=false;
|
||||
shouldObjectDetectingResultsBeForgot=false;
|
||||
}
|
||||
if(isWorking()) {
|
||||
stateThread=STATE_THREAD_WORKING_SLEEPING;
|
||||
}
|
||||
pthread_mutex_unlock(&mutex);
|
||||
|
||||
objects.clear();
|
||||
}// while(isWorking())
|
||||
|
||||
|
||||
pthread_mutex_lock(&mutex);
|
||||
|
||||
stateThread=STATE_THREAD_STOPPED;
|
||||
|
||||
isObjectDetectingReady=false;
|
||||
shouldObjectDetectingResultsBeForgot=false;
|
||||
|
||||
pthread_cond_signal(&objectDetectorThreadStartStop);
|
||||
|
||||
pthread_mutex_unlock(&mutex);
|
||||
|
||||
LOGI("DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector: Returning");
|
||||
}
|
||||
|
||||
void cv::DetectionBasedTracker::SeparateDetectionWork::stop()
|
||||
{
|
||||
//FIXME: TODO: should add quickStop functionality
|
||||
pthread_mutex_lock(&mutex);
|
||||
if (!isWorking()) {
|
||||
pthread_mutex_unlock(&mutex);
|
||||
LOGE("SimpleHighguiDemoCore::stop is called but the SimpleHighguiDemoCore pthread is not active");
|
||||
return;
|
||||
}
|
||||
stateThread=STATE_THREAD_STOPPING;
|
||||
LOGD("DetectionBasedTracker::SeparateDetectionWork::stop: before going to sleep to wait for the signal from the workthread");
|
||||
pthread_cond_signal(&objectDetectorRun);
|
||||
pthread_cond_wait(&objectDetectorThreadStartStop, &mutex);
|
||||
LOGD("DetectionBasedTracker::SeparateDetectionWork::stop: after receiving the signal from the workthread, stateThread=%d", (int)stateThread);
|
||||
pthread_mutex_unlock(&mutex);
|
||||
}
|
||||
|
||||
void cv::DetectionBasedTracker::SeparateDetectionWork::resetTracking()
|
||||
{
|
||||
LOGD("DetectionBasedTracker::SeparateDetectionWork::resetTracking");
|
||||
pthread_mutex_lock(&mutex);
|
||||
|
||||
if (stateThread == STATE_THREAD_WORKING_WITH_IMAGE) {
|
||||
LOGD("DetectionBasedTracker::SeparateDetectionWork::resetTracking: since workthread is detecting objects at the moment, we should make cascadeInThread stop detecting and forget the detecting results");
|
||||
shouldObjectDetectingResultsBeForgot=true;
|
||||
//cascadeInThread.setStopFlag();//FIXME: TODO: this feature also should be contributed to OpenCV
|
||||
} else {
|
||||
LOGD("DetectionBasedTracker::SeparateDetectionWork::resetTracking: since workthread is NOT detecting objects at the moment, we should NOT make any additional actions");
|
||||
}
|
||||
|
||||
resultDetect.clear();
|
||||
isObjectDetectingReady=false;
|
||||
|
||||
|
||||
pthread_mutex_unlock(&mutex);
|
||||
|
||||
}
|
||||
|
||||
bool cv::DetectionBasedTracker::SeparateDetectionWork::communicateWithDetectingThread(const Mat& imageGray, std::vector<Rect>& rectsWhereRegions)
|
||||
{
|
||||
static double freq = getTickFrequency();
|
||||
|
||||
bool shouldCommunicateWithDetectingThread = (stateThread==STATE_THREAD_WORKING_SLEEPING);
|
||||
LOGD("DetectionBasedTracker::SeparateDetectionWork::communicateWithDetectingThread: shouldCommunicateWithDetectingThread=%d", (shouldCommunicateWithDetectingThread?1:0));
|
||||
|
||||
if (!shouldCommunicateWithDetectingThread) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool shouldHandleResult = false;
|
||||
pthread_mutex_lock(&mutex);
|
||||
|
||||
if (isObjectDetectingReady) {
|
||||
shouldHandleResult=true;
|
||||
rectsWhereRegions = resultDetect;
|
||||
isObjectDetectingReady=false;
|
||||
|
||||
double lastBigDetectionDuration = 1000.0 * (((double)(getTickCount() - timeWhenDetectingThreadStartedWork )) / freq);
|
||||
(void)(lastBigDetectionDuration);
|
||||
LOGD("DetectionBasedTracker::SeparateDetectionWork::communicateWithDetectingThread: lastBigDetectionDuration=%f ms", (double)lastBigDetectionDuration);
|
||||
}
|
||||
|
||||
bool shouldSendNewDataToWorkThread = true;
|
||||
if (timeWhenDetectingThreadStartedWork > 0) {
|
||||
double time_from_previous_launch_in_ms=1000.0 * (((double)(getTickCount() - timeWhenDetectingThreadStartedWork )) / freq); //the same formula as for lastBigDetectionDuration
|
||||
shouldSendNewDataToWorkThread = (time_from_previous_launch_in_ms >= detectionBasedTracker.parameters.minDetectionPeriod);
|
||||
LOGD("DetectionBasedTracker::SeparateDetectionWork::communicateWithDetectingThread: shouldSendNewDataToWorkThread was 1, now it is %d, since time_from_previous_launch_in_ms=%.2f, minDetectionPeriod=%d",
|
||||
(shouldSendNewDataToWorkThread?1:0), time_from_previous_launch_in_ms, detectionBasedTracker.parameters.minDetectionPeriod);
|
||||
}
|
||||
|
||||
if (shouldSendNewDataToWorkThread) {
|
||||
|
||||
imageSeparateDetecting.create(imageGray.size(), CV_8UC1);
|
||||
|
||||
imageGray.copyTo(imageSeparateDetecting);//may change imageSeparateDetecting ptr. But should not.
|
||||
|
||||
|
||||
timeWhenDetectingThreadStartedWork = getTickCount() ;
|
||||
|
||||
pthread_cond_signal(&objectDetectorRun);
|
||||
}
|
||||
|
||||
pthread_mutex_unlock(&mutex);
|
||||
LOGD("DetectionBasedTracker::SeparateDetectionWork::communicateWithDetectingThread: result: shouldHandleResult=%d", (shouldHandleResult?1:0));
|
||||
|
||||
return shouldHandleResult;
|
||||
}
|
||||
|
||||
cv::DetectionBasedTracker::Parameters::Parameters()
|
||||
{
|
||||
maxTrackLifetime=5;
|
||||
minDetectionPeriod=0;
|
||||
}
|
||||
|
||||
cv::DetectionBasedTracker::InnerParameters::InnerParameters()
|
||||
{
|
||||
numLastPositionsToTrack=4;
|
||||
numStepsToWaitBeforeFirstShow=6;
|
||||
numStepsToTrackWithoutDetectingIfObjectHasNotBeenShown=3;
|
||||
numStepsToShowWithoutDetecting=3;
|
||||
|
||||
coeffTrackingWindowSize=2.0;
|
||||
coeffObjectSizeToTrack=0.85;
|
||||
coeffObjectSpeedUsingInPrediction=0.8;
|
||||
|
||||
}
|
||||
|
||||
cv::DetectionBasedTracker::DetectionBasedTracker(cv::Ptr<IDetector> mainDetector, cv::Ptr<IDetector> trackingDetector, const Parameters& params)
|
||||
:separateDetectionWork(),
|
||||
parameters(params),
|
||||
innerParameters(),
|
||||
numTrackedSteps(0),
|
||||
cascadeForTracking(trackingDetector)
|
||||
{
|
||||
CV_Assert( (params.maxTrackLifetime >= 0)
|
||||
// && mainDetector
|
||||
&& trackingDetector );
|
||||
|
||||
if (mainDetector) {
|
||||
separateDetectionWork.reset(new SeparateDetectionWork(*this, mainDetector));
|
||||
}
|
||||
|
||||
weightsPositionsSmoothing.push_back(1);
|
||||
weightsSizesSmoothing.push_back(0.5);
|
||||
weightsSizesSmoothing.push_back(0.3);
|
||||
weightsSizesSmoothing.push_back(0.2);
|
||||
}
|
||||
|
||||
cv::DetectionBasedTracker::~DetectionBasedTracker()
|
||||
{
|
||||
}
|
||||
|
||||
void DetectionBasedTracker::process(const Mat& imageGray)
|
||||
{
|
||||
CV_Assert(imageGray.type()==CV_8UC1);
|
||||
|
||||
if ( separateDetectionWork && !separateDetectionWork->isWorking() ) {
|
||||
separateDetectionWork->run();
|
||||
}
|
||||
|
||||
static double freq = getTickFrequency();
|
||||
static long long time_when_last_call_started=getTickCount();
|
||||
|
||||
{
|
||||
double delta_time_from_prev_call=1000.0 * (((double)(getTickCount() - time_when_last_call_started)) / freq);
|
||||
(void)(delta_time_from_prev_call);
|
||||
LOGD("DetectionBasedTracker::process: time from the previous call is %f ms", (double)delta_time_from_prev_call);
|
||||
time_when_last_call_started=getTickCount();
|
||||
}
|
||||
|
||||
Mat imageDetect=imageGray;
|
||||
|
||||
std::vector<Rect> rectsWhereRegions;
|
||||
bool shouldHandleResult=false;
|
||||
if (separateDetectionWork) {
|
||||
shouldHandleResult = separateDetectionWork->communicateWithDetectingThread(imageGray, rectsWhereRegions);
|
||||
}
|
||||
|
||||
if (shouldHandleResult) {
|
||||
LOGD("DetectionBasedTracker::process: get _rectsWhereRegions were got from resultDetect");
|
||||
} else {
|
||||
LOGD("DetectionBasedTracker::process: get _rectsWhereRegions from previous positions");
|
||||
for(size_t i = 0; i < trackedObjects.size(); i++) {
|
||||
int n = trackedObjects[i].lastPositions.size();
|
||||
CV_Assert(n > 0);
|
||||
|
||||
Rect r = trackedObjects[i].lastPositions[n-1];
|
||||
if(r.area() == 0) {
|
||||
LOGE("DetectionBasedTracker::process: ERROR: ATTENTION: strange algorithm's behavior: trackedObjects[i].rect() is empty");
|
||||
continue;
|
||||
}
|
||||
|
||||
//correction by speed of rectangle
|
||||
if (n > 1) {
|
||||
Point2f center = centerRect(r);
|
||||
Point2f center_prev = centerRect(trackedObjects[i].lastPositions[n-2]);
|
||||
Point2f shift = (center - center_prev) * innerParameters.coeffObjectSpeedUsingInPrediction;
|
||||
|
||||
r.x += cvRound(shift.x);
|
||||
r.y += cvRound(shift.y);
|
||||
}
|
||||
|
||||
|
||||
rectsWhereRegions.push_back(r);
|
||||
}
|
||||
}
|
||||
LOGI("DetectionBasedTracker::process: tracked objects num==%d", (int)trackedObjects.size());
|
||||
|
||||
std::vector<Rect> detectedObjectsInRegions;
|
||||
|
||||
LOGD("DetectionBasedTracker::process: rectsWhereRegions.size()=%d", (int)rectsWhereRegions.size());
|
||||
for(size_t i=0; i < rectsWhereRegions.size(); i++) {
|
||||
Rect r = rectsWhereRegions[i];
|
||||
|
||||
detectInRegion(imageDetect, r, detectedObjectsInRegions);
|
||||
}
|
||||
LOGD("DetectionBasedTracker::process: detectedObjectsInRegions.size()=%d", (int)detectedObjectsInRegions.size());
|
||||
|
||||
updateTrackedObjects(detectedObjectsInRegions);
|
||||
}
|
||||
|
||||
void cv::DetectionBasedTracker::getObjects(std::vector<cv::Rect>& result) const
|
||||
{
|
||||
result.clear();
|
||||
|
||||
for(size_t i=0; i < trackedObjects.size(); i++) {
|
||||
Rect r=calcTrackedObjectPositionToShow(i);
|
||||
if (r.area()==0) {
|
||||
continue;
|
||||
}
|
||||
result.push_back(r);
|
||||
LOGD("DetectionBasedTracker::process: found a object with SIZE %d x %d, rect={%d, %d, %d x %d}", r.width, r.height, r.x, r.y, r.width, r.height);
|
||||
}
|
||||
}
|
||||
|
||||
void cv::DetectionBasedTracker::getObjects(std::vector<Object>& result) const
|
||||
{
|
||||
result.clear();
|
||||
|
||||
for(size_t i=0; i < trackedObjects.size(); i++) {
|
||||
Rect r=calcTrackedObjectPositionToShow(i);
|
||||
if (r.area()==0) {
|
||||
continue;
|
||||
}
|
||||
result.push_back(Object(r, trackedObjects[i].id));
|
||||
LOGD("DetectionBasedTracker::process: found a object with SIZE %d x %d, rect={%d, %d, %d x %d}", r.width, r.height, r.x, r.y, r.width, r.height);
|
||||
}
|
||||
}
|
||||
void cv::DetectionBasedTracker::getObjects(std::vector<ExtObject>& result) const
|
||||
{
|
||||
result.clear();
|
||||
|
||||
for(size_t i=0; i < trackedObjects.size(); i++) {
|
||||
ObjectStatus status;
|
||||
Rect r=calcTrackedObjectPositionToShow(i, status);
|
||||
result.push_back(ExtObject(trackedObjects[i].id, r, status));
|
||||
LOGD("DetectionBasedTracker::process: found a object with SIZE %d x %d, rect={%d, %d, %d x %d}, status = %d", r.width, r.height, r.x, r.y, r.width, r.height, (int)status);
|
||||
}
|
||||
}
|
||||
|
||||
bool cv::DetectionBasedTracker::run()
|
||||
{
|
||||
if (separateDetectionWork) {
|
||||
return separateDetectionWork->run();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void cv::DetectionBasedTracker::stop()
|
||||
{
|
||||
if (separateDetectionWork) {
|
||||
separateDetectionWork->stop();
|
||||
}
|
||||
}
|
||||
|
||||
void cv::DetectionBasedTracker::resetTracking()
|
||||
{
|
||||
if (separateDetectionWork) {
|
||||
separateDetectionWork->resetTracking();
|
||||
}
|
||||
trackedObjects.clear();
|
||||
}
|
||||
|
||||
void cv::DetectionBasedTracker::updateTrackedObjects(const std::vector<Rect>& detectedObjects)
|
||||
{
|
||||
enum {
|
||||
NEW_RECTANGLE=-1,
|
||||
INTERSECTED_RECTANGLE=-2
|
||||
};
|
||||
|
||||
int N1=trackedObjects.size();
|
||||
int N2=detectedObjects.size();
|
||||
LOGD("DetectionBasedTracker::updateTrackedObjects: N1=%d, N2=%d", N1, N2);
|
||||
|
||||
for(int i=0; i < N1; i++) {
|
||||
trackedObjects[i].numDetectedFrames++;
|
||||
}
|
||||
|
||||
std::vector<int> correspondence(detectedObjects.size(), NEW_RECTANGLE);
|
||||
correspondence.clear();
|
||||
correspondence.resize(detectedObjects.size(), NEW_RECTANGLE);
|
||||
|
||||
for(int i=0; i < N1; i++) {
|
||||
LOGD("DetectionBasedTracker::updateTrackedObjects: i=%d", i);
|
||||
TrackedObject& curObject=trackedObjects[i];
|
||||
|
||||
int bestIndex=-1;
|
||||
int bestArea=-1;
|
||||
|
||||
int numpositions=curObject.lastPositions.size();
|
||||
CV_Assert(numpositions > 0);
|
||||
Rect prevRect=curObject.lastPositions[numpositions-1];
|
||||
LOGD("DetectionBasedTracker::updateTrackedObjects: prevRect[%d]={%d, %d, %d x %d}", i, prevRect.x, prevRect.y, prevRect.width, prevRect.height);
|
||||
|
||||
for(int j=0; j < N2; j++) {
|
||||
LOGD("DetectionBasedTracker::updateTrackedObjects: j=%d", j);
|
||||
if (correspondence[j] >= 0) {
|
||||
LOGD("DetectionBasedTracker::updateTrackedObjects: j=%d is rejected, because it has correspondence=%d", j, correspondence[j]);
|
||||
continue;
|
||||
}
|
||||
if (correspondence[j] !=NEW_RECTANGLE) {
|
||||
LOGD("DetectionBasedTracker::updateTrackedObjects: j=%d is rejected, because it is intersected with another rectangle", j);
|
||||
continue;
|
||||
}
|
||||
LOGD("DetectionBasedTracker::updateTrackedObjects: detectedObjects[%d]={%d, %d, %d x %d}",
|
||||
j, detectedObjects[j].x, detectedObjects[j].y, detectedObjects[j].width, detectedObjects[j].height);
|
||||
|
||||
Rect r=prevRect & detectedObjects[j];
|
||||
if ( (r.width > 0) && (r.height > 0) ) {
|
||||
LOGD("DetectionBasedTracker::updateTrackedObjects: There is intersection between prevRect and detectedRect, r={%d, %d, %d x %d}",
|
||||
r.x, r.y, r.width, r.height);
|
||||
correspondence[j]=INTERSECTED_RECTANGLE;
|
||||
|
||||
if ( r.area() > bestArea) {
|
||||
LOGD("DetectionBasedTracker::updateTrackedObjects: The area of intersection is %d, it is better than bestArea=%d", r.area(), bestArea);
|
||||
bestIndex=j;
|
||||
bestArea=r.area();
|
||||
}
|
||||
}
|
||||
}
|
||||
if (bestIndex >= 0) {
|
||||
LOGD("DetectionBasedTracker::updateTrackedObjects: The best correspondence for i=%d is j=%d", i, bestIndex);
|
||||
correspondence[bestIndex]=i;
|
||||
|
||||
for(int j=0; j < N2; j++) {
|
||||
if (correspondence[j] >= 0)
|
||||
continue;
|
||||
|
||||
Rect r=detectedObjects[j] & detectedObjects[bestIndex];
|
||||
if ( (r.width > 0) && (r.height > 0) ) {
|
||||
LOGD("DetectionBasedTracker::updateTrackedObjects: Found intersection between "
|
||||
"rectangles j=%d and bestIndex=%d, rectangle j=%d is marked as intersected", j, bestIndex, j);
|
||||
correspondence[j]=INTERSECTED_RECTANGLE;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
LOGD("DetectionBasedTracker::updateTrackedObjects: There is no correspondence for i=%d ", i);
|
||||
curObject.numFramesNotDetected++;
|
||||
}
|
||||
}
|
||||
|
||||
LOGD("DetectionBasedTracker::updateTrackedObjects: start second cycle");
|
||||
for(int j=0; j < N2; j++) {
|
||||
LOGD("DetectionBasedTracker::updateTrackedObjects: j=%d", j);
|
||||
int i=correspondence[j];
|
||||
if (i >= 0) {//add position
|
||||
LOGD("DetectionBasedTracker::updateTrackedObjects: add position");
|
||||
trackedObjects[i].lastPositions.push_back(detectedObjects[j]);
|
||||
while ((int)trackedObjects[i].lastPositions.size() > (int) innerParameters.numLastPositionsToTrack) {
|
||||
trackedObjects[i].lastPositions.erase(trackedObjects[i].lastPositions.begin());
|
||||
}
|
||||
trackedObjects[i].numFramesNotDetected=0;
|
||||
} else if (i==NEW_RECTANGLE){ //new object
|
||||
LOGD("DetectionBasedTracker::updateTrackedObjects: new object");
|
||||
trackedObjects.push_back(detectedObjects[j]);
|
||||
} else {
|
||||
LOGD("DetectionBasedTracker::updateTrackedObjects: was auxiliary intersection");
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<TrackedObject>::iterator it=trackedObjects.begin();
|
||||
while( it != trackedObjects.end() ) {
|
||||
if ( (it->numFramesNotDetected > parameters.maxTrackLifetime)
|
||||
||
|
||||
(
|
||||
(it->numDetectedFrames <= innerParameters.numStepsToWaitBeforeFirstShow)
|
||||
&&
|
||||
(it->numFramesNotDetected > innerParameters.numStepsToTrackWithoutDetectingIfObjectHasNotBeenShown)
|
||||
)
|
||||
)
|
||||
{
|
||||
int numpos=it->lastPositions.size();
|
||||
CV_Assert(numpos > 0);
|
||||
Rect r = it->lastPositions[numpos-1];
|
||||
(void)(r);
|
||||
LOGD("DetectionBasedTracker::updateTrackedObjects: deleted object {%d, %d, %d x %d}",
|
||||
r.x, r.y, r.width, r.height);
|
||||
it=trackedObjects.erase(it);
|
||||
} else {
|
||||
it++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int cv::DetectionBasedTracker::addObject(const Rect& location)
|
||||
{
|
||||
LOGD("DetectionBasedTracker::addObject: new object {%d, %d %dx%d}",location.x, location.y, location.width, location.height);
|
||||
trackedObjects.push_back(TrackedObject(location));
|
||||
int newId = trackedObjects.back().id;
|
||||
LOGD("DetectionBasedTracker::addObject: newId = %d", newId);
|
||||
return newId;
|
||||
}
|
||||
|
||||
Rect cv::DetectionBasedTracker::calcTrackedObjectPositionToShow(int i) const
|
||||
{
|
||||
ObjectStatus status;
|
||||
return calcTrackedObjectPositionToShow(i, status);
|
||||
}
|
||||
Rect cv::DetectionBasedTracker::calcTrackedObjectPositionToShow(int i, ObjectStatus& status) const
|
||||
{
|
||||
if ( (i < 0) || (i >= (int)trackedObjects.size()) ) {
|
||||
LOGE("DetectionBasedTracker::calcTrackedObjectPositionToShow: ERROR: wrong i=%d", i);
|
||||
status = WRONG_OBJECT;
|
||||
return Rect();
|
||||
}
|
||||
if (trackedObjects[i].numDetectedFrames <= innerParameters.numStepsToWaitBeforeFirstShow){
|
||||
LOGI("DetectionBasedTracker::calcTrackedObjectPositionToShow: trackedObjects[%d].numDetectedFrames=%d <= numStepsToWaitBeforeFirstShow=%d --- return empty Rect()",
|
||||
i, trackedObjects[i].numDetectedFrames, innerParameters.numStepsToWaitBeforeFirstShow);
|
||||
status = DETECTED_NOT_SHOWN_YET;
|
||||
return Rect();
|
||||
}
|
||||
if (trackedObjects[i].numFramesNotDetected > innerParameters.numStepsToShowWithoutDetecting) {
|
||||
status = DETECTED_TEMPORARY_LOST;
|
||||
return Rect();
|
||||
}
|
||||
|
||||
const TrackedObject::PositionsVector& lastPositions=trackedObjects[i].lastPositions;
|
||||
|
||||
int N=lastPositions.size();
|
||||
if (N<=0) {
|
||||
LOGE("DetectionBasedTracker::calcTrackedObjectPositionToShow: ERROR: no positions for i=%d", i);
|
||||
status = WRONG_OBJECT;
|
||||
return Rect();
|
||||
}
|
||||
|
||||
int Nsize=std::min(N, (int)weightsSizesSmoothing.size());
|
||||
int Ncenter= std::min(N, (int)weightsPositionsSmoothing.size());
|
||||
|
||||
Point2f center;
|
||||
double w=0, h=0;
|
||||
if (Nsize > 0) {
|
||||
double sum=0;
|
||||
for(int j=0; j < Nsize; j++) {
|
||||
int k=N-j-1;
|
||||
w += lastPositions[k].width * weightsSizesSmoothing[j];
|
||||
h += lastPositions[k].height * weightsSizesSmoothing[j];
|
||||
sum+=weightsSizesSmoothing[j];
|
||||
}
|
||||
w /= sum;
|
||||
h /= sum;
|
||||
} else {
|
||||
w=lastPositions[N-1].width;
|
||||
h=lastPositions[N-1].height;
|
||||
}
|
||||
|
||||
if (Ncenter > 0) {
|
||||
double sum=0;
|
||||
for(int j=0; j < Ncenter; j++) {
|
||||
int k=N-j-1;
|
||||
Point tl(lastPositions[k].tl());
|
||||
Point br(lastPositions[k].br());
|
||||
Point2f c1;
|
||||
c1=tl;
|
||||
c1=c1* 0.5f;
|
||||
Point2f c2;
|
||||
c2=br;
|
||||
c2=c2*0.5f;
|
||||
c1=c1+c2;
|
||||
|
||||
center=center+ (c1 * weightsPositionsSmoothing[j]);
|
||||
sum+=weightsPositionsSmoothing[j];
|
||||
}
|
||||
center *= (float)(1 / sum);
|
||||
} else {
|
||||
int k=N-1;
|
||||
Point tl(lastPositions[k].tl());
|
||||
Point br(lastPositions[k].br());
|
||||
Point2f c1;
|
||||
c1=tl;
|
||||
c1=c1* 0.5f;
|
||||
Point2f c2;
|
||||
c2=br;
|
||||
c2=c2*0.5f;
|
||||
|
||||
center=c1+c2;
|
||||
}
|
||||
Point2f tl=center-(Point2f(w,h)*0.5);
|
||||
Rect res(cvRound(tl.x), cvRound(tl.y), cvRound(w), cvRound(h));
|
||||
LOGD("DetectionBasedTracker::calcTrackedObjectPositionToShow: Result for i=%d: {%d, %d, %d x %d}", i, res.x, res.y, res.width, res.height);
|
||||
|
||||
status = DETECTED;
|
||||
return res;
|
||||
}
|
||||
|
||||
void cv::DetectionBasedTracker::detectInRegion(const Mat& img, const Rect& r, std::vector<Rect>& detectedObjectsInRegions)
|
||||
{
|
||||
Rect r0(Point(), img.size());
|
||||
Rect r1 = scale_rect(r, innerParameters.coeffTrackingWindowSize);
|
||||
r1 = r1 & r0;
|
||||
|
||||
if ( (r1.width <=0) || (r1.height <= 0) ) {
|
||||
LOGD("DetectionBasedTracker::detectInRegion: Empty intersection");
|
||||
return;
|
||||
}
|
||||
|
||||
int d = cvRound(std::min(r.width, r.height) * innerParameters.coeffObjectSizeToTrack);
|
||||
|
||||
std::vector<Rect> tmpobjects;
|
||||
|
||||
Mat img1(img, r1);//subimage for rectangle -- without data copying
|
||||
LOGD("DetectionBasedTracker::detectInRegion: img1.size()=%d x %d, d=%d",
|
||||
img1.size().width, img1.size().height, d);
|
||||
|
||||
cascadeForTracking->setMinObjectSize(Size(d, d));
|
||||
cascadeForTracking->detect(img1, tmpobjects);
|
||||
/*
|
||||
detectMultiScale( img1, tmpobjects,
|
||||
parameters.scaleFactor, parameters.minNeighbors, 0
|
||||
|CV_HAAR_FIND_BIGGEST_OBJECT
|
||||
|CV_HAAR_SCALE_IMAGE
|
||||
,
|
||||
Size(d,d),
|
||||
max_objectSize
|
||||
);*/
|
||||
|
||||
for(size_t i=0; i < tmpobjects.size(); i++) {
|
||||
Rect curres(tmpobjects[i].tl() + r1.tl(), tmpobjects[i].size());
|
||||
detectedObjectsInRegions.push_back(curres);
|
||||
}
|
||||
}
|
||||
|
||||
bool cv::DetectionBasedTracker::setParameters(const Parameters& params)
|
||||
{
|
||||
if ( params.maxTrackLifetime < 0 )
|
||||
{
|
||||
LOGE("DetectionBasedTracker::setParameters: ERROR: wrong parameters value");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (separateDetectionWork) {
|
||||
separateDetectionWork->lock();
|
||||
}
|
||||
parameters=params;
|
||||
if (separateDetectionWork) {
|
||||
separateDetectionWork->unlock();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
const cv::DetectionBasedTracker::Parameters& DetectionBasedTracker::getParameters() const
|
||||
{
|
||||
return parameters;
|
||||
}
|
||||
|
||||
#endif
|
@ -1,901 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2011,2012. Philipp Wagner <bytefish[at]gmx[dot]de>.
|
||||
* Released to public domain under terms of the BSD Simplified license.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* * Neither the name of the organization nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
*
|
||||
* See <http://www.opensource.org/licenses/bsd-license>
|
||||
*/
|
||||
#include "precomp.hpp"
|
||||
#include <set>
|
||||
#include <limits>
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
// Reads a sequence from a FileNode::SEQ with type _Tp into a result vector.
|
||||
template<typename _Tp>
|
||||
inline void readFileNodeList(const FileNode& fn, std::vector<_Tp>& result) {
|
||||
if (fn.type() == FileNode::SEQ) {
|
||||
for (FileNodeIterator it = fn.begin(); it != fn.end();) {
|
||||
_Tp item;
|
||||
it >> item;
|
||||
result.push_back(item);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Writes the a list of given items to a cv::FileStorage.
|
||||
template<typename _Tp>
|
||||
inline void writeFileNodeList(FileStorage& fs, const String& name,
|
||||
const std::vector<_Tp>& items) {
|
||||
// typedefs
|
||||
typedef typename std::vector<_Tp>::const_iterator constVecIterator;
|
||||
// write the elements in item to fs
|
||||
fs << name << "[";
|
||||
for (constVecIterator it = items.begin(); it != items.end(); ++it) {
|
||||
fs << *it;
|
||||
}
|
||||
fs << "]";
|
||||
}
|
||||
|
||||
static Mat asRowMatrix(InputArrayOfArrays src, int rtype, double alpha=1, double beta=0) {
|
||||
// make sure the input data is a vector of matrices or vector of vector
|
||||
if(src.kind() != _InputArray::STD_VECTOR_MAT && src.kind() != _InputArray::STD_VECTOR_VECTOR) {
|
||||
String error_message = "The data is expected as InputArray::STD_VECTOR_MAT (a std::vector<Mat>) or _InputArray::STD_VECTOR_VECTOR (a std::vector< std::vector<...> >).";
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
}
|
||||
// number of samples
|
||||
size_t n = src.total();
|
||||
// return empty matrix if no matrices given
|
||||
if(n == 0)
|
||||
return Mat();
|
||||
// dimensionality of (reshaped) samples
|
||||
size_t d = src.getMat(0).total();
|
||||
// create data matrix
|
||||
Mat data((int)n, (int)d, rtype);
|
||||
// now copy data
|
||||
for(unsigned int i = 0; i < n; i++) {
|
||||
// make sure data can be reshaped, throw exception if not!
|
||||
if(src.getMat(i).total() != d) {
|
||||
String error_message = format("Wrong number of elements in matrix #%d! Expected %d was %d.", i, d, src.getMat(i).total());
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
}
|
||||
// get a hold of the current row
|
||||
Mat xi = data.row(i);
|
||||
// make reshape happy by cloning for non-continuous matrices
|
||||
if(src.getMat(i).isContinuous()) {
|
||||
src.getMat(i).reshape(1, 1).convertTo(xi, rtype, alpha, beta);
|
||||
} else {
|
||||
src.getMat(i).clone().reshape(1, 1).convertTo(xi, rtype, alpha, beta);
|
||||
}
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
|
||||
// Removes duplicate elements in a given vector.
|
||||
template<typename _Tp>
|
||||
inline std::vector<_Tp> remove_dups(const std::vector<_Tp>& src) {
|
||||
typedef typename std::set<_Tp>::const_iterator constSetIterator;
|
||||
typedef typename std::vector<_Tp>::const_iterator constVecIterator;
|
||||
std::set<_Tp> set_elems;
|
||||
for (constVecIterator it = src.begin(); it != src.end(); ++it)
|
||||
set_elems.insert(*it);
|
||||
std::vector<_Tp> elems;
|
||||
for (constSetIterator it = set_elems.begin(); it != set_elems.end(); ++it)
|
||||
elems.push_back(*it);
|
||||
return elems;
|
||||
}
|
||||
|
||||
|
||||
// Turk, M., and Pentland, A. "Eigenfaces for recognition.". Journal of
|
||||
// Cognitive Neuroscience 3 (1991), 71–86.
|
||||
class Eigenfaces : public FaceRecognizer
|
||||
{
|
||||
private:
|
||||
int _num_components;
|
||||
double _threshold;
|
||||
std::vector<Mat> _projections;
|
||||
Mat _labels;
|
||||
Mat _eigenvectors;
|
||||
Mat _eigenvalues;
|
||||
Mat _mean;
|
||||
|
||||
public:
|
||||
using FaceRecognizer::save;
|
||||
using FaceRecognizer::load;
|
||||
|
||||
// Initializes an empty Eigenfaces model.
|
||||
Eigenfaces(int num_components = 0, double threshold = DBL_MAX) :
|
||||
_num_components(num_components),
|
||||
_threshold(threshold) {}
|
||||
|
||||
// Initializes and computes an Eigenfaces model with images in src and
|
||||
// corresponding labels in labels. num_components will be kept for
|
||||
// classification.
|
||||
Eigenfaces(InputArrayOfArrays src, InputArray labels,
|
||||
int num_components = 0, double threshold = DBL_MAX) :
|
||||
_num_components(num_components),
|
||||
_threshold(threshold) {
|
||||
train(src, labels);
|
||||
}
|
||||
|
||||
// Computes an Eigenfaces model with images in src and corresponding labels
|
||||
// in labels.
|
||||
void train(InputArrayOfArrays src, InputArray labels);
|
||||
|
||||
// Predicts the label of a query image in src.
|
||||
int predict(InputArray src) const;
|
||||
|
||||
// Predicts the label and confidence for a given sample.
|
||||
void predict(InputArray _src, int &label, double &dist) const;
|
||||
|
||||
// See FaceRecognizer::load.
|
||||
void load(const FileStorage& fs);
|
||||
|
||||
// See FaceRecognizer::save.
|
||||
void save(FileStorage& fs) const;
|
||||
|
||||
AlgorithmInfo* info() const;
|
||||
};
|
||||
|
||||
// Belhumeur, P. N., Hespanha, J., and Kriegman, D. "Eigenfaces vs. Fisher-
|
||||
// faces: Recognition using class specific linear projection.". IEEE
|
||||
// Transactions on Pattern Analysis and Machine Intelligence 19, 7 (1997),
|
||||
// 711–720.
|
||||
class Fisherfaces: public FaceRecognizer
|
||||
{
|
||||
private:
|
||||
int _num_components;
|
||||
double _threshold;
|
||||
Mat _eigenvectors;
|
||||
Mat _eigenvalues;
|
||||
Mat _mean;
|
||||
std::vector<Mat> _projections;
|
||||
Mat _labels;
|
||||
|
||||
public:
|
||||
using FaceRecognizer::save;
|
||||
using FaceRecognizer::load;
|
||||
|
||||
// Initializes an empty Fisherfaces model.
|
||||
Fisherfaces(int num_components = 0, double threshold = DBL_MAX) :
|
||||
_num_components(num_components),
|
||||
_threshold(threshold) {}
|
||||
|
||||
// Initializes and computes a Fisherfaces model with images in src and
|
||||
// corresponding labels in labels. num_components will be kept for
|
||||
// classification.
|
||||
Fisherfaces(InputArrayOfArrays src, InputArray labels,
|
||||
int num_components = 0, double threshold = DBL_MAX) :
|
||||
_num_components(num_components),
|
||||
_threshold(threshold) {
|
||||
train(src, labels);
|
||||
}
|
||||
|
||||
~Fisherfaces() {}
|
||||
|
||||
// Computes a Fisherfaces model with images in src and corresponding labels
|
||||
// in labels.
|
||||
void train(InputArrayOfArrays src, InputArray labels);
|
||||
|
||||
// Predicts the label of a query image in src.
|
||||
int predict(InputArray src) const;
|
||||
|
||||
// Predicts the label and confidence for a given sample.
|
||||
void predict(InputArray _src, int &label, double &dist) const;
|
||||
|
||||
// See FaceRecognizer::load.
|
||||
void load(const FileStorage& fs);
|
||||
|
||||
// See FaceRecognizer::save.
|
||||
void save(FileStorage& fs) const;
|
||||
|
||||
AlgorithmInfo* info() const;
|
||||
};
|
||||
|
||||
// Face Recognition based on Local Binary Patterns.
|
||||
//
|
||||
// Ahonen T, Hadid A. and Pietikäinen M. "Face description with local binary
|
||||
// patterns: Application to face recognition." IEEE Transactions on Pattern
|
||||
// Analysis and Machine Intelligence, 28(12):2037-2041.
|
||||
//
|
||||
class LBPH : public FaceRecognizer
|
||||
{
|
||||
private:
|
||||
int _grid_x;
|
||||
int _grid_y;
|
||||
int _radius;
|
||||
int _neighbors;
|
||||
double _threshold;
|
||||
|
||||
std::vector<Mat> _histograms;
|
||||
Mat _labels;
|
||||
|
||||
// Computes a LBPH model with images in src and
|
||||
// corresponding labels in labels, possibly preserving
|
||||
// old model data.
|
||||
void train(InputArrayOfArrays src, InputArray labels, bool preserveData);
|
||||
|
||||
|
||||
public:
|
||||
using FaceRecognizer::save;
|
||||
using FaceRecognizer::load;
|
||||
|
||||
// Initializes this LBPH Model. The current implementation is rather fixed
|
||||
// as it uses the Extended Local Binary Patterns per default.
|
||||
//
|
||||
// radius, neighbors are used in the local binary patterns creation.
|
||||
// grid_x, grid_y control the grid size of the spatial histograms.
|
||||
LBPH(int radius_=1, int neighbors_=8,
|
||||
int gridx=8, int gridy=8,
|
||||
double threshold = DBL_MAX) :
|
||||
_grid_x(gridx),
|
||||
_grid_y(gridy),
|
||||
_radius(radius_),
|
||||
_neighbors(neighbors_),
|
||||
_threshold(threshold) {}
|
||||
|
||||
// Initializes and computes this LBPH Model. The current implementation is
|
||||
// rather fixed as it uses the Extended Local Binary Patterns per default.
|
||||
//
|
||||
// (radius=1), (neighbors=8) are used in the local binary patterns creation.
|
||||
// (grid_x=8), (grid_y=8) controls the grid size of the spatial histograms.
|
||||
LBPH(InputArrayOfArrays src,
|
||||
InputArray labels,
|
||||
int radius_=1, int neighbors_=8,
|
||||
int gridx=8, int gridy=8,
|
||||
double threshold = DBL_MAX) :
|
||||
_grid_x(gridx),
|
||||
_grid_y(gridy),
|
||||
_radius(radius_),
|
||||
_neighbors(neighbors_),
|
||||
_threshold(threshold) {
|
||||
train(src, labels);
|
||||
}
|
||||
|
||||
~LBPH() { }
|
||||
|
||||
// Computes a LBPH model with images in src and
|
||||
// corresponding labels in labels.
|
||||
void train(InputArrayOfArrays src, InputArray labels);
|
||||
|
||||
// Updates this LBPH model with images in src and
|
||||
// corresponding labels in labels.
|
||||
void update(InputArrayOfArrays src, InputArray labels);
|
||||
|
||||
// Predicts the label of a query image in src.
|
||||
int predict(InputArray src) const;
|
||||
|
||||
// Predicts the label and confidence for a given sample.
|
||||
void predict(InputArray _src, int &label, double &dist) const;
|
||||
|
||||
// See FaceRecognizer::load.
|
||||
void load(const FileStorage& fs);
|
||||
|
||||
// See FaceRecognizer::save.
|
||||
void save(FileStorage& fs) const;
|
||||
|
||||
// Getter functions.
|
||||
int neighbors() const { return _neighbors; }
|
||||
int radius() const { return _radius; }
|
||||
int grid_x() const { return _grid_x; }
|
||||
int grid_y() const { return _grid_y; }
|
||||
|
||||
AlgorithmInfo* info() const;
|
||||
};
|
||||
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// FaceRecognizer
|
||||
//------------------------------------------------------------------------------
|
||||
void FaceRecognizer::update(InputArrayOfArrays src, InputArray labels ) {
|
||||
if( dynamic_cast<LBPH*>(this) != 0 )
|
||||
{
|
||||
dynamic_cast<LBPH*>(this)->update( src, labels );
|
||||
return;
|
||||
}
|
||||
|
||||
String error_msg = format("This FaceRecognizer (%s) does not support updating, you have to use FaceRecognizer::train to update it.", this->name().c_str());
|
||||
CV_Error(Error::StsNotImplemented, error_msg);
|
||||
}
|
||||
|
||||
void FaceRecognizer::save(const String& filename) const {
|
||||
FileStorage fs(filename, FileStorage::WRITE);
|
||||
if (!fs.isOpened())
|
||||
CV_Error(Error::StsError, "File can't be opened for writing!");
|
||||
this->save(fs);
|
||||
fs.release();
|
||||
}
|
||||
|
||||
void FaceRecognizer::load(const String& filename) {
|
||||
FileStorage fs(filename, FileStorage::READ);
|
||||
if (!fs.isOpened())
|
||||
CV_Error(Error::StsError, "File can't be opened for writing!");
|
||||
this->load(fs);
|
||||
fs.release();
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// Eigenfaces
|
||||
//------------------------------------------------------------------------------
|
||||
void Eigenfaces::train(InputArrayOfArrays _src, InputArray _local_labels) {
|
||||
if(_src.total() == 0) {
|
||||
String error_message = format("Empty training data was given. You'll need more than one sample to learn a model.");
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
} else if(_local_labels.getMat().type() != CV_32SC1) {
|
||||
String error_message = format("Labels must be given as integer (CV_32SC1). Expected %d, but was %d.", CV_32SC1, _local_labels.type());
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
}
|
||||
// make sure data has correct size
|
||||
if(_src.total() > 1) {
|
||||
for(int i = 1; i < static_cast<int>(_src.total()); i++) {
|
||||
if(_src.getMat(i-1).total() != _src.getMat(i).total()) {
|
||||
String error_message = format("In the Eigenfaces method all input samples (training images) must be of equal size! Expected %d pixels, but was %d pixels.", _src.getMat(i-1).total(), _src.getMat(i).total());
|
||||
CV_Error(Error::StsUnsupportedFormat, error_message);
|
||||
}
|
||||
}
|
||||
}
|
||||
// get labels
|
||||
Mat labels = _local_labels.getMat();
|
||||
// observations in row
|
||||
Mat data = asRowMatrix(_src, CV_64FC1);
|
||||
|
||||
// number of samples
|
||||
int n = data.rows;
|
||||
// assert there are as much samples as labels
|
||||
if(static_cast<int>(labels.total()) != n) {
|
||||
String error_message = format("The number of samples (src) must equal the number of labels (labels)! len(src)=%d, len(labels)=%d.", n, labels.total());
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
}
|
||||
// clear existing model data
|
||||
_labels.release();
|
||||
_projections.clear();
|
||||
// clip number of components to be valid
|
||||
if((_num_components <= 0) || (_num_components > n))
|
||||
_num_components = n;
|
||||
|
||||
// perform the PCA
|
||||
PCA pca(data, Mat(), PCA::DATA_AS_ROW, _num_components);
|
||||
// copy the PCA results
|
||||
_mean = pca.mean.reshape(1,1); // store the mean vector
|
||||
_eigenvalues = pca.eigenvalues.clone(); // eigenvalues by row
|
||||
transpose(pca.eigenvectors, _eigenvectors); // eigenvectors by column
|
||||
// store labels for prediction
|
||||
_labels = labels.clone();
|
||||
// save projections
|
||||
for(int sampleIdx = 0; sampleIdx < data.rows; sampleIdx++) {
|
||||
Mat p = subspaceProject(_eigenvectors, _mean, data.row(sampleIdx));
|
||||
_projections.push_back(p);
|
||||
}
|
||||
}
|
||||
|
||||
void Eigenfaces::predict(InputArray _src, int &minClass, double &minDist) const {
|
||||
// get data
|
||||
Mat src = _src.getMat();
|
||||
// make sure the user is passing correct data
|
||||
if(_projections.empty()) {
|
||||
// throw error if no data (or simply return -1?)
|
||||
String error_message = "This Eigenfaces model is not computed yet. Did you call Eigenfaces::train?";
|
||||
CV_Error(Error::StsError, error_message);
|
||||
} else if(_eigenvectors.rows != static_cast<int>(src.total())) {
|
||||
// check data alignment just for clearer exception messages
|
||||
String error_message = format("Wrong input image size. Reason: Training and Test images must be of equal size! Expected an image with %d elements, but got %d.", _eigenvectors.rows, src.total());
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
}
|
||||
// project into PCA subspace
|
||||
Mat q = subspaceProject(_eigenvectors, _mean, src.reshape(1,1));
|
||||
minDist = DBL_MAX;
|
||||
minClass = -1;
|
||||
for(size_t sampleIdx = 0; sampleIdx < _projections.size(); sampleIdx++) {
|
||||
double dist = norm(_projections[sampleIdx], q, NORM_L2);
|
||||
if((dist < minDist) && (dist < _threshold)) {
|
||||
minDist = dist;
|
||||
minClass = _labels.at<int>((int)sampleIdx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int Eigenfaces::predict(InputArray _src) const {
|
||||
int label;
|
||||
double dummy;
|
||||
predict(_src, label, dummy);
|
||||
return label;
|
||||
}
|
||||
|
||||
void Eigenfaces::load(const FileStorage& fs) {
|
||||
//read matrices
|
||||
fs["num_components"] >> _num_components;
|
||||
fs["mean"] >> _mean;
|
||||
fs["eigenvalues"] >> _eigenvalues;
|
||||
fs["eigenvectors"] >> _eigenvectors;
|
||||
// read sequences
|
||||
readFileNodeList(fs["projections"], _projections);
|
||||
fs["labels"] >> _labels;
|
||||
}
|
||||
|
||||
void Eigenfaces::save(FileStorage& fs) const {
|
||||
// write matrices
|
||||
fs << "num_components" << _num_components;
|
||||
fs << "mean" << _mean;
|
||||
fs << "eigenvalues" << _eigenvalues;
|
||||
fs << "eigenvectors" << _eigenvectors;
|
||||
// write sequences
|
||||
writeFileNodeList(fs, "projections", _projections);
|
||||
fs << "labels" << _labels;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// Fisherfaces
|
||||
//------------------------------------------------------------------------------
|
||||
void Fisherfaces::train(InputArrayOfArrays src, InputArray _lbls) {
|
||||
if(src.total() == 0) {
|
||||
String error_message = format("Empty training data was given. You'll need more than one sample to learn a model.");
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
} else if(_lbls.getMat().type() != CV_32SC1) {
|
||||
String error_message = format("Labels must be given as integer (CV_32SC1). Expected %d, but was %d.", CV_32SC1, _lbls.type());
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
}
|
||||
// make sure data has correct size
|
||||
if(src.total() > 1) {
|
||||
for(int i = 1; i < static_cast<int>(src.total()); i++) {
|
||||
if(src.getMat(i-1).total() != src.getMat(i).total()) {
|
||||
String error_message = format("In the Fisherfaces method all input samples (training images) must be of equal size! Expected %d pixels, but was %d pixels.", src.getMat(i-1).total(), src.getMat(i).total());
|
||||
CV_Error(Error::StsUnsupportedFormat, error_message);
|
||||
}
|
||||
}
|
||||
}
|
||||
// get data
|
||||
Mat labels = _lbls.getMat();
|
||||
Mat data = asRowMatrix(src, CV_64FC1);
|
||||
// number of samples
|
||||
int N = data.rows;
|
||||
// make sure labels are passed in correct shape
|
||||
if(labels.total() != (size_t) N) {
|
||||
String error_message = format("The number of samples (src) must equal the number of labels (labels)! len(src)=%d, len(labels)=%d.", N, labels.total());
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
} else if(labels.rows != 1 && labels.cols != 1) {
|
||||
String error_message = format("Expected the labels in a matrix with one row or column! Given dimensions are rows=%s, cols=%d.", labels.rows, labels.cols);
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
}
|
||||
// clear existing model data
|
||||
_labels.release();
|
||||
_projections.clear();
|
||||
// safely copy from cv::Mat to std::vector
|
||||
std::vector<int> ll;
|
||||
for(unsigned int i = 0; i < labels.total(); i++) {
|
||||
ll.push_back(labels.at<int>(i));
|
||||
}
|
||||
// get the number of unique classes
|
||||
int C = (int) remove_dups(ll).size();
|
||||
// clip number of components to be a valid number
|
||||
if((_num_components <= 0) || (_num_components > (C-1)))
|
||||
_num_components = (C-1);
|
||||
// perform a PCA and keep (N-C) components
|
||||
PCA pca(data, Mat(), PCA::DATA_AS_ROW, (N-C));
|
||||
// project the data and perform a LDA on it
|
||||
LDA lda(pca.project(data),labels, _num_components);
|
||||
// store the total mean vector
|
||||
_mean = pca.mean.reshape(1,1);
|
||||
// store labels
|
||||
_labels = labels.clone();
|
||||
// store the eigenvalues of the discriminants
|
||||
lda.eigenvalues().convertTo(_eigenvalues, CV_64FC1);
|
||||
// Now calculate the projection matrix as pca.eigenvectors * lda.eigenvectors.
|
||||
// Note: OpenCV stores the eigenvectors by row, so we need to transpose it!
|
||||
gemm(pca.eigenvectors, lda.eigenvectors(), 1.0, Mat(), 0.0, _eigenvectors, GEMM_1_T);
|
||||
// store the projections of the original data
|
||||
for(int sampleIdx = 0; sampleIdx < data.rows; sampleIdx++) {
|
||||
Mat p = subspaceProject(_eigenvectors, _mean, data.row(sampleIdx));
|
||||
_projections.push_back(p);
|
||||
}
|
||||
}
|
||||
|
||||
void Fisherfaces::predict(InputArray _src, int &minClass, double &minDist) const {
|
||||
Mat src = _src.getMat();
|
||||
// check data alignment just for clearer exception messages
|
||||
if(_projections.empty()) {
|
||||
// throw error if no data (or simply return -1?)
|
||||
String error_message = "This Fisherfaces model is not computed yet. Did you call Fisherfaces::train?";
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
} else if(src.total() != (size_t) _eigenvectors.rows) {
|
||||
String error_message = format("Wrong input image size. Reason: Training and Test images must be of equal size! Expected an image with %d elements, but got %d.", _eigenvectors.rows, src.total());
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
}
|
||||
// project into LDA subspace
|
||||
Mat q = subspaceProject(_eigenvectors, _mean, src.reshape(1,1));
|
||||
// find 1-nearest neighbor
|
||||
minDist = DBL_MAX;
|
||||
minClass = -1;
|
||||
for(size_t sampleIdx = 0; sampleIdx < _projections.size(); sampleIdx++) {
|
||||
double dist = norm(_projections[sampleIdx], q, NORM_L2);
|
||||
if((dist < minDist) && (dist < _threshold)) {
|
||||
minDist = dist;
|
||||
minClass = _labels.at<int>((int)sampleIdx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int Fisherfaces::predict(InputArray _src) const {
|
||||
int label;
|
||||
double dummy;
|
||||
predict(_src, label, dummy);
|
||||
return label;
|
||||
}
|
||||
|
||||
// See FaceRecognizer::load.
|
||||
void Fisherfaces::load(const FileStorage& fs) {
|
||||
//read matrices
|
||||
fs["num_components"] >> _num_components;
|
||||
fs["mean"] >> _mean;
|
||||
fs["eigenvalues"] >> _eigenvalues;
|
||||
fs["eigenvectors"] >> _eigenvectors;
|
||||
// read sequences
|
||||
readFileNodeList(fs["projections"], _projections);
|
||||
fs["labels"] >> _labels;
|
||||
}
|
||||
|
||||
// See FaceRecognizer::save.
|
||||
void Fisherfaces::save(FileStorage& fs) const {
|
||||
// write matrices
|
||||
fs << "num_components" << _num_components;
|
||||
fs << "mean" << _mean;
|
||||
fs << "eigenvalues" << _eigenvalues;
|
||||
fs << "eigenvectors" << _eigenvectors;
|
||||
// write sequences
|
||||
writeFileNodeList(fs, "projections", _projections);
|
||||
fs << "labels" << _labels;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// LBPH
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
template <typename _Tp> static
|
||||
void olbp_(InputArray _src, OutputArray _dst) {
|
||||
// get matrices
|
||||
Mat src = _src.getMat();
|
||||
// allocate memory for result
|
||||
_dst.create(src.rows-2, src.cols-2, CV_8UC1);
|
||||
Mat dst = _dst.getMat();
|
||||
// zero the result matrix
|
||||
dst.setTo(0);
|
||||
// calculate patterns
|
||||
for(int i=1;i<src.rows-1;i++) {
|
||||
for(int j=1;j<src.cols-1;j++) {
|
||||
_Tp center = src.at<_Tp>(i,j);
|
||||
unsigned char code = 0;
|
||||
code |= (src.at<_Tp>(i-1,j-1) >= center) << 7;
|
||||
code |= (src.at<_Tp>(i-1,j) >= center) << 6;
|
||||
code |= (src.at<_Tp>(i-1,j+1) >= center) << 5;
|
||||
code |= (src.at<_Tp>(i,j+1) >= center) << 4;
|
||||
code |= (src.at<_Tp>(i+1,j+1) >= center) << 3;
|
||||
code |= (src.at<_Tp>(i+1,j) >= center) << 2;
|
||||
code |= (src.at<_Tp>(i+1,j-1) >= center) << 1;
|
||||
code |= (src.at<_Tp>(i,j-1) >= center) << 0;
|
||||
dst.at<unsigned char>(i-1,j-1) = code;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// cv::elbp
|
||||
//------------------------------------------------------------------------------
|
||||
template <typename _Tp> static
|
||||
inline void elbp_(InputArray _src, OutputArray _dst, int radius, int neighbors) {
|
||||
//get matrices
|
||||
Mat src = _src.getMat();
|
||||
// allocate memory for result
|
||||
_dst.create(src.rows-2*radius, src.cols-2*radius, CV_32SC1);
|
||||
Mat dst = _dst.getMat();
|
||||
// zero
|
||||
dst.setTo(0);
|
||||
for(int n=0; n<neighbors; n++) {
|
||||
// sample points
|
||||
float x = static_cast<float>(radius * cos(2.0*CV_PI*n/static_cast<float>(neighbors)));
|
||||
float y = static_cast<float>(-radius * sin(2.0*CV_PI*n/static_cast<float>(neighbors)));
|
||||
// relative indices
|
||||
int fx = static_cast<int>(floor(x));
|
||||
int fy = static_cast<int>(floor(y));
|
||||
int cx = static_cast<int>(ceil(x));
|
||||
int cy = static_cast<int>(ceil(y));
|
||||
// fractional part
|
||||
float ty = y - fy;
|
||||
float tx = x - fx;
|
||||
// set interpolation weights
|
||||
float w1 = (1 - tx) * (1 - ty);
|
||||
float w2 = tx * (1 - ty);
|
||||
float w3 = (1 - tx) * ty;
|
||||
float w4 = tx * ty;
|
||||
// iterate through your data
|
||||
for(int i=radius; i < src.rows-radius;i++) {
|
||||
for(int j=radius;j < src.cols-radius;j++) {
|
||||
// calculate interpolated value
|
||||
float t = static_cast<float>(w1*src.at<_Tp>(i+fy,j+fx) + w2*src.at<_Tp>(i+fy,j+cx) + w3*src.at<_Tp>(i+cy,j+fx) + w4*src.at<_Tp>(i+cy,j+cx));
|
||||
// floating point precision, so check some machine-dependent epsilon
|
||||
dst.at<int>(i-radius,j-radius) += ((t > src.at<_Tp>(i,j)) || (std::abs(t-src.at<_Tp>(i,j)) < std::numeric_limits<float>::epsilon())) << n;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void elbp(InputArray src, OutputArray dst, int radius, int neighbors)
|
||||
{
|
||||
int type = src.type();
|
||||
switch (type) {
|
||||
case CV_8SC1: elbp_<char>(src,dst, radius, neighbors); break;
|
||||
case CV_8UC1: elbp_<unsigned char>(src, dst, radius, neighbors); break;
|
||||
case CV_16SC1: elbp_<short>(src,dst, radius, neighbors); break;
|
||||
case CV_16UC1: elbp_<unsigned short>(src,dst, radius, neighbors); break;
|
||||
case CV_32SC1: elbp_<int>(src,dst, radius, neighbors); break;
|
||||
case CV_32FC1: elbp_<float>(src,dst, radius, neighbors); break;
|
||||
case CV_64FC1: elbp_<double>(src,dst, radius, neighbors); break;
|
||||
default:
|
||||
String error_msg = format("Using Original Local Binary Patterns for feature extraction only works on single-channel images (given %d). Please pass the image data as a grayscale image!", type);
|
||||
CV_Error(Error::StsNotImplemented, error_msg);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static Mat
|
||||
histc_(const Mat& src, int minVal=0, int maxVal=255, bool normed=false)
|
||||
{
|
||||
Mat result;
|
||||
// Establish the number of bins.
|
||||
int histSize = maxVal-minVal+1;
|
||||
// Set the ranges.
|
||||
float range[] = { static_cast<float>(minVal), static_cast<float>(maxVal+1) };
|
||||
const float* histRange = { range };
|
||||
// calc histogram
|
||||
calcHist(&src, 1, 0, Mat(), result, 1, &histSize, &histRange, true, false);
|
||||
// normalize
|
||||
if(normed) {
|
||||
result /= (int)src.total();
|
||||
}
|
||||
return result.reshape(1,1);
|
||||
}
|
||||
|
||||
static Mat histc(InputArray _src, int minVal, int maxVal, bool normed)
|
||||
{
|
||||
Mat src = _src.getMat();
|
||||
switch (src.type()) {
|
||||
case CV_8SC1:
|
||||
return histc_(Mat_<float>(src), minVal, maxVal, normed);
|
||||
break;
|
||||
case CV_8UC1:
|
||||
return histc_(src, minVal, maxVal, normed);
|
||||
break;
|
||||
case CV_16SC1:
|
||||
return histc_(Mat_<float>(src), minVal, maxVal, normed);
|
||||
break;
|
||||
case CV_16UC1:
|
||||
return histc_(src, minVal, maxVal, normed);
|
||||
break;
|
||||
case CV_32SC1:
|
||||
return histc_(Mat_<float>(src), minVal, maxVal, normed);
|
||||
break;
|
||||
case CV_32FC1:
|
||||
return histc_(src, minVal, maxVal, normed);
|
||||
break;
|
||||
default:
|
||||
CV_Error(Error::StsUnmatchedFormats, "This type is not implemented yet."); break;
|
||||
}
|
||||
return Mat();
|
||||
}
|
||||
|
||||
|
||||
static Mat spatial_histogram(InputArray _src, int numPatterns,
|
||||
int grid_x, int grid_y, bool /*normed*/)
|
||||
{
|
||||
Mat src = _src.getMat();
|
||||
// calculate LBP patch size
|
||||
int width = src.cols/grid_x;
|
||||
int height = src.rows/grid_y;
|
||||
// allocate memory for the spatial histogram
|
||||
Mat result = Mat::zeros(grid_x * grid_y, numPatterns, CV_32FC1);
|
||||
// return matrix with zeros if no data was given
|
||||
if(src.empty())
|
||||
return result.reshape(1,1);
|
||||
// initial result_row
|
||||
int resultRowIdx = 0;
|
||||
// iterate through grid
|
||||
for(int i = 0; i < grid_y; i++) {
|
||||
for(int j = 0; j < grid_x; j++) {
|
||||
Mat src_cell = Mat(src, Range(i*height,(i+1)*height), Range(j*width,(j+1)*width));
|
||||
Mat cell_hist = histc(src_cell, 0, (numPatterns-1), true);
|
||||
// copy to the result matrix
|
||||
Mat result_row = result.row(resultRowIdx);
|
||||
cell_hist.reshape(1,1).convertTo(result_row, CV_32FC1);
|
||||
// increase row count in result matrix
|
||||
resultRowIdx++;
|
||||
}
|
||||
}
|
||||
// return result as reshaped feature vector
|
||||
return result.reshape(1,1);
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// wrapper to cv::elbp (extended local binary patterns)
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
static Mat elbp(InputArray src, int radius, int neighbors) {
|
||||
Mat dst;
|
||||
elbp(src, dst, radius, neighbors);
|
||||
return dst;
|
||||
}
|
||||
|
||||
void LBPH::load(const FileStorage& fs) {
|
||||
fs["radius"] >> _radius;
|
||||
fs["neighbors"] >> _neighbors;
|
||||
fs["grid_x"] >> _grid_x;
|
||||
fs["grid_y"] >> _grid_y;
|
||||
//read matrices
|
||||
readFileNodeList(fs["histograms"], _histograms);
|
||||
fs["labels"] >> _labels;
|
||||
}
|
||||
|
||||
// See FaceRecognizer::save.
|
||||
void LBPH::save(FileStorage& fs) const {
|
||||
fs << "radius" << _radius;
|
||||
fs << "neighbors" << _neighbors;
|
||||
fs << "grid_x" << _grid_x;
|
||||
fs << "grid_y" << _grid_y;
|
||||
// write matrices
|
||||
writeFileNodeList(fs, "histograms", _histograms);
|
||||
fs << "labels" << _labels;
|
||||
}
|
||||
|
||||
void LBPH::train(InputArrayOfArrays _in_src, InputArray _in_labels) {
|
||||
this->train(_in_src, _in_labels, false);
|
||||
}
|
||||
|
||||
void LBPH::update(InputArrayOfArrays _in_src, InputArray _in_labels) {
|
||||
// got no data, just return
|
||||
if(_in_src.total() == 0)
|
||||
return;
|
||||
|
||||
this->train(_in_src, _in_labels, true);
|
||||
}
|
||||
|
||||
void LBPH::train(InputArrayOfArrays _in_src, InputArray _in_labels, bool preserveData) {
|
||||
if(_in_src.kind() != _InputArray::STD_VECTOR_MAT && _in_src.kind() != _InputArray::STD_VECTOR_VECTOR) {
|
||||
String error_message = "The images are expected as InputArray::STD_VECTOR_MAT (a std::vector<Mat>) or _InputArray::STD_VECTOR_VECTOR (a std::vector< std::vector<...> >).";
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
}
|
||||
if(_in_src.total() == 0) {
|
||||
String error_message = format("Empty training data was given. You'll need more than one sample to learn a model.");
|
||||
CV_Error(Error::StsUnsupportedFormat, error_message);
|
||||
} else if(_in_labels.getMat().type() != CV_32SC1) {
|
||||
String error_message = format("Labels must be given as integer (CV_32SC1). Expected %d, but was %d.", CV_32SC1, _in_labels.type());
|
||||
CV_Error(Error::StsUnsupportedFormat, error_message);
|
||||
}
|
||||
// get the vector of matrices
|
||||
std::vector<Mat> src;
|
||||
_in_src.getMatVector(src);
|
||||
// get the label matrix
|
||||
Mat labels = _in_labels.getMat();
|
||||
// check if data is well- aligned
|
||||
if(labels.total() != src.size()) {
|
||||
String error_message = format("The number of samples (src) must equal the number of labels (labels). Was len(samples)=%d, len(labels)=%d.", src.size(), _labels.total());
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
}
|
||||
// if this model should be trained without preserving old data, delete old model data
|
||||
if(!preserveData) {
|
||||
_labels.release();
|
||||
_histograms.clear();
|
||||
}
|
||||
// append labels to _labels matrix
|
||||
for(size_t labelIdx = 0; labelIdx < labels.total(); labelIdx++) {
|
||||
_labels.push_back(labels.at<int>((int)labelIdx));
|
||||
}
|
||||
// store the spatial histograms of the original data
|
||||
for(size_t sampleIdx = 0; sampleIdx < src.size(); sampleIdx++) {
|
||||
// calculate lbp image
|
||||
Mat lbp_image = elbp(src[sampleIdx], _radius, _neighbors);
|
||||
// get spatial histogram from this lbp image
|
||||
Mat p = spatial_histogram(
|
||||
lbp_image, /* lbp_image */
|
||||
static_cast<int>(std::pow(2.0, static_cast<double>(_neighbors))), /* number of possible patterns */
|
||||
_grid_x, /* grid size x */
|
||||
_grid_y, /* grid size y */
|
||||
true);
|
||||
// add to templates
|
||||
_histograms.push_back(p);
|
||||
}
|
||||
}
|
||||
|
||||
void LBPH::predict(InputArray _src, int &minClass, double &minDist) const {
|
||||
if(_histograms.empty()) {
|
||||
// throw error if no data (or simply return -1?)
|
||||
String error_message = "This LBPH model is not computed yet. Did you call the train method?";
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
}
|
||||
Mat src = _src.getMat();
|
||||
// get the spatial histogram from input image
|
||||
Mat lbp_image = elbp(src, _radius, _neighbors);
|
||||
Mat query = spatial_histogram(
|
||||
lbp_image, /* lbp_image */
|
||||
static_cast<int>(std::pow(2.0, static_cast<double>(_neighbors))), /* number of possible patterns */
|
||||
_grid_x, /* grid size x */
|
||||
_grid_y, /* grid size y */
|
||||
true /* normed histograms */);
|
||||
// find 1-nearest neighbor
|
||||
minDist = DBL_MAX;
|
||||
minClass = -1;
|
||||
for(size_t sampleIdx = 0; sampleIdx < _histograms.size(); sampleIdx++) {
|
||||
double dist = compareHist(_histograms[sampleIdx], query, HISTCMP_CHISQR_ALT);
|
||||
if((dist < minDist) && (dist < _threshold)) {
|
||||
minDist = dist;
|
||||
minClass = _labels.at<int>((int) sampleIdx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int LBPH::predict(InputArray _src) const {
|
||||
int label;
|
||||
double dummy;
|
||||
predict(_src, label, dummy);
|
||||
return label;
|
||||
}
|
||||
|
||||
|
||||
Ptr<FaceRecognizer> createEigenFaceRecognizer(int num_components, double threshold)
|
||||
{
|
||||
return makePtr<Eigenfaces>(num_components, threshold);
|
||||
}
|
||||
|
||||
Ptr<FaceRecognizer> createFisherFaceRecognizer(int num_components, double threshold)
|
||||
{
|
||||
return makePtr<Fisherfaces>(num_components, threshold);
|
||||
}
|
||||
|
||||
Ptr<FaceRecognizer> createLBPHFaceRecognizer(int radius, int neighbors,
|
||||
int grid_x, int grid_y, double threshold)
|
||||
{
|
||||
return makePtr<LBPH>(radius, neighbors, grid_x, grid_y, threshold);
|
||||
}
|
||||
|
||||
CV_INIT_ALGORITHM(Eigenfaces, "FaceRecognizer.Eigenfaces",
|
||||
obj.info()->addParam(obj, "ncomponents", obj._num_components);
|
||||
obj.info()->addParam(obj, "threshold", obj._threshold);
|
||||
obj.info()->addParam(obj, "projections", obj._projections, true);
|
||||
obj.info()->addParam(obj, "labels", obj._labels, true);
|
||||
obj.info()->addParam(obj, "eigenvectors", obj._eigenvectors, true);
|
||||
obj.info()->addParam(obj, "eigenvalues", obj._eigenvalues, true);
|
||||
obj.info()->addParam(obj, "mean", obj._mean, true))
|
||||
|
||||
CV_INIT_ALGORITHM(Fisherfaces, "FaceRecognizer.Fisherfaces",
|
||||
obj.info()->addParam(obj, "ncomponents", obj._num_components);
|
||||
obj.info()->addParam(obj, "threshold", obj._threshold);
|
||||
obj.info()->addParam(obj, "projections", obj._projections, true);
|
||||
obj.info()->addParam(obj, "labels", obj._labels, true);
|
||||
obj.info()->addParam(obj, "eigenvectors", obj._eigenvectors, true);
|
||||
obj.info()->addParam(obj, "eigenvalues", obj._eigenvalues, true);
|
||||
obj.info()->addParam(obj, "mean", obj._mean, true))
|
||||
|
||||
CV_INIT_ALGORITHM(LBPH, "FaceRecognizer.LBPH",
|
||||
obj.info()->addParam(obj, "radius", obj._radius);
|
||||
obj.info()->addParam(obj, "neighbors", obj._neighbors);
|
||||
obj.info()->addParam(obj, "grid_x", obj._grid_x);
|
||||
obj.info()->addParam(obj, "grid_y", obj._grid_y);
|
||||
obj.info()->addParam(obj, "threshold", obj._threshold);
|
||||
obj.info()->addParam(obj, "histograms", obj._histograms, true);
|
||||
obj.info()->addParam(obj, "labels", obj._labels, true))
|
||||
|
||||
bool initModule_contrib()
|
||||
{
|
||||
Ptr<Algorithm> efaces = createEigenfaces_ptr_hidden(), ffaces = createFisherfaces_ptr_hidden(), lbph = createLBPH_ptr_hidden();
|
||||
return efaces->info() != 0 && ffaces->info() != 0 && lbph->info() != 0;
|
||||
}
|
||||
|
||||
}
|
@ -1,229 +0,0 @@
|
||||
//*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
#include "precomp.hpp"
|
||||
#include <stdio.h>
|
||||
#include <iostream>
|
||||
#include "opencv2/calib3d.hpp"
|
||||
#include "opencv2/contrib/hybridtracker.hpp"
|
||||
|
||||
#ifdef HAVE_OPENCV_NONFREE
|
||||
#include "opencv2/nonfree/nonfree.hpp"
|
||||
|
||||
static bool makeUseOfNonfree = initModule_nonfree();
|
||||
#endif
|
||||
|
||||
using namespace cv;
|
||||
|
||||
CvFeatureTracker::CvFeatureTracker(CvFeatureTrackerParams _params) :
|
||||
params(_params)
|
||||
{
|
||||
switch (params.feature_type)
|
||||
{
|
||||
case CvFeatureTrackerParams::SIFT:
|
||||
dd = Algorithm::create<Feature2D>("Feature2D.SIFT");
|
||||
if( !dd )
|
||||
CV_Error(CV_StsNotImplemented, "OpenCV has been compiled without SIFT support");
|
||||
dd->set("nOctaveLayers", 5);
|
||||
dd->set("contrastThreshold", 0.04);
|
||||
dd->set("edgeThreshold", 10.7);
|
||||
break;
|
||||
case CvFeatureTrackerParams::SURF:
|
||||
dd = Algorithm::create<Feature2D>("Feature2D.SURF");
|
||||
if( !dd )
|
||||
CV_Error(CV_StsNotImplemented, "OpenCV has been compiled without SURF support");
|
||||
dd->set("hessianThreshold", 400);
|
||||
dd->set("nOctaves", 3);
|
||||
dd->set("nOctaveLayers", 4);
|
||||
break;
|
||||
default:
|
||||
CV_Error(CV_StsBadArg, "Unknown feature type");
|
||||
break;
|
||||
}
|
||||
|
||||
matcher = makePtr<BFMatcher>(int(NORM_L2));
|
||||
}
|
||||
|
||||
CvFeatureTracker::~CvFeatureTracker()
|
||||
{
|
||||
}
|
||||
|
||||
void CvFeatureTracker::newTrackingWindow(Mat image, Rect selection)
|
||||
{
|
||||
image.copyTo(prev_image);
|
||||
cvtColor(prev_image, prev_image_bw, COLOR_BGR2GRAY);
|
||||
prev_trackwindow = selection;
|
||||
prev_center.x = selection.x;
|
||||
prev_center.y = selection.y;
|
||||
ittr = 0;
|
||||
}
|
||||
|
||||
Rect CvFeatureTracker::updateTrackingWindow(Mat image)
|
||||
{
|
||||
if(params.feature_type == CvFeatureTrackerParams::OPTICAL_FLOW)
|
||||
return updateTrackingWindowWithFlow(image);
|
||||
else
|
||||
return updateTrackingWindowWithSIFT(image);
|
||||
}
|
||||
|
||||
Rect CvFeatureTracker::updateTrackingWindowWithSIFT(Mat image)
|
||||
{
|
||||
ittr++;
|
||||
std::vector<KeyPoint> prev_keypoints, curr_keypoints;
|
||||
std::vector<Point2f> prev_keys, curr_keys;
|
||||
Mat prev_desc, curr_desc;
|
||||
|
||||
Rect window = prev_trackwindow;
|
||||
Mat mask = Mat::zeros(image.size(), CV_8UC1);
|
||||
rectangle(mask, Point(window.x, window.y), Point(window.x + window.width,
|
||||
window.y + window.height), Scalar(255), CV_FILLED);
|
||||
|
||||
dd->operator()(prev_image, mask, prev_keypoints, prev_desc);
|
||||
|
||||
window.x -= params.window_size;
|
||||
window.y -= params.window_size;
|
||||
window.width += params.window_size;
|
||||
window.height += params.window_size;
|
||||
rectangle(mask, Point(window.x, window.y), Point(window.x + window.width,
|
||||
window.y + window.height), Scalar(255), CV_FILLED);
|
||||
|
||||
dd->operator()(image, mask, curr_keypoints, curr_desc);
|
||||
|
||||
if (prev_keypoints.size() > 4 && curr_keypoints.size() > 4)
|
||||
{
|
||||
//descriptor->compute(prev_image, prev_keypoints, prev_desc);
|
||||
//descriptor->compute(image, curr_keypoints, curr_desc);
|
||||
|
||||
matcher->match(prev_desc, curr_desc, matches);
|
||||
|
||||
for (int i = 0; i < (int)matches.size(); i++)
|
||||
{
|
||||
prev_keys.push_back(prev_keypoints[matches[i].queryIdx].pt);
|
||||
curr_keys.push_back(curr_keypoints[matches[i].trainIdx].pt);
|
||||
}
|
||||
|
||||
Mat T = findHomography(prev_keys, curr_keys, LMEDS);
|
||||
|
||||
prev_trackwindow.x += cvRound(T.at<double> (0, 2));
|
||||
prev_trackwindow.y += cvRound(T.at<double> (1, 2));
|
||||
}
|
||||
|
||||
prev_center.x = prev_trackwindow.x;
|
||||
prev_center.y = prev_trackwindow.y;
|
||||
prev_image = image;
|
||||
return prev_trackwindow;
|
||||
}
|
||||
|
||||
Rect CvFeatureTracker::updateTrackingWindowWithFlow(Mat image)
|
||||
{
|
||||
ittr++;
|
||||
Size subPixWinSize(10,10), winSize(31,31);
|
||||
Mat image_bw;
|
||||
TermCriteria termcrit(TermCriteria::COUNT | TermCriteria::EPS, 20, 0.03);
|
||||
std::vector<uchar> status;
|
||||
std::vector<float> err;
|
||||
|
||||
cvtColor(image, image_bw, COLOR_BGR2GRAY);
|
||||
cvtColor(prev_image, prev_image_bw, COLOR_BGR2GRAY);
|
||||
|
||||
if (ittr == 1)
|
||||
{
|
||||
Mat mask = Mat::zeros(image.size(), CV_8UC1);
|
||||
rectangle(mask, Point(prev_trackwindow.x, prev_trackwindow.y), Point(
|
||||
prev_trackwindow.x + prev_trackwindow.width, prev_trackwindow.y
|
||||
+ prev_trackwindow.height), Scalar(255), CV_FILLED);
|
||||
goodFeaturesToTrack(image_bw, features[1], 500, 0.01, 20, mask, 3, 0, 0.04);
|
||||
cornerSubPix(image_bw, features[1], subPixWinSize, Size(-1, -1), termcrit);
|
||||
}
|
||||
else
|
||||
{
|
||||
calcOpticalFlowPyrLK(prev_image_bw, image_bw, features[0], features[1],
|
||||
status, err, winSize, 3, termcrit);
|
||||
|
||||
Point2f feature0_center(0, 0);
|
||||
Point2f feature1_center(0, 0);
|
||||
int goodtracks = 0;
|
||||
for (int i = 0; i < (int)features[1].size(); i++)
|
||||
{
|
||||
if (status[i] == 1)
|
||||
{
|
||||
feature0_center.x += features[0][i].x;
|
||||
feature0_center.y += features[0][i].y;
|
||||
feature1_center.x += features[1][i].x;
|
||||
feature1_center.y += features[1][i].y;
|
||||
goodtracks++;
|
||||
}
|
||||
}
|
||||
|
||||
feature0_center.x /= goodtracks;
|
||||
feature0_center.y /= goodtracks;
|
||||
feature1_center.x /= goodtracks;
|
||||
feature1_center.y /= goodtracks;
|
||||
|
||||
prev_center.x += (feature1_center.x - feature0_center.x);
|
||||
prev_center.y += (feature1_center.y - feature0_center.y);
|
||||
|
||||
prev_trackwindow.x = (int)prev_center.x;
|
||||
prev_trackwindow.y = (int)prev_center.y;
|
||||
}
|
||||
|
||||
swap(features[0], features[1]);
|
||||
image.copyTo(prev_image);
|
||||
return prev_trackwindow;
|
||||
}
|
||||
|
||||
void CvFeatureTracker::setTrackingWindow(Rect _window)
|
||||
{
|
||||
prev_trackwindow = _window;
|
||||
}
|
||||
|
||||
Rect CvFeatureTracker::getTrackingWindow()
|
||||
{
|
||||
return prev_trackwindow;
|
||||
}
|
||||
|
||||
Point2f CvFeatureTracker::getTrackingCenter()
|
||||
{
|
||||
Point2f center(0, 0);
|
||||
center.x = (float)(prev_center.x + prev_trackwindow.width/2.0);
|
||||
center.y = (float)(prev_center.y + prev_trackwindow.height/2.0);
|
||||
return center;
|
||||
}
|
@ -1,721 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install, copy or use the software.
|
||||
//
|
||||
// Copyright (C) 2009, Farhad Dadgostar
|
||||
// Intel Corporation and third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
#include "opencv2/contrib/compat.hpp"
|
||||
|
||||
CvFuzzyPoint::CvFuzzyPoint(double _x, double _y)
|
||||
{
|
||||
x = _x;
|
||||
y = _y;
|
||||
}
|
||||
|
||||
bool CvFuzzyCurve::between(double x, double x1, double x2)
|
||||
{
|
||||
if ((x >= x1) && (x <= x2))
|
||||
return true;
|
||||
else if ((x >= x2) && (x <= x1))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
CvFuzzyCurve::CvFuzzyCurve()
|
||||
{
|
||||
value = 0;
|
||||
}
|
||||
|
||||
CvFuzzyCurve::~CvFuzzyCurve()
|
||||
{
|
||||
// nothing to do
|
||||
}
|
||||
|
||||
void CvFuzzyCurve::setCentre(double _centre)
|
||||
{
|
||||
centre = _centre;
|
||||
}
|
||||
|
||||
double CvFuzzyCurve::getCentre()
|
||||
{
|
||||
return centre;
|
||||
}
|
||||
|
||||
void CvFuzzyCurve::clear()
|
||||
{
|
||||
points.clear();
|
||||
}
|
||||
|
||||
void CvFuzzyCurve::addPoint(double x, double y)
|
||||
{
|
||||
points.push_back(CvFuzzyPoint(x, y));
|
||||
}
|
||||
|
||||
double CvFuzzyCurve::calcValue(double param)
|
||||
{
|
||||
int size = (int)points.size();
|
||||
double x1, y1, x2, y2, m, y;
|
||||
for (int i = 1; i < size; i++)
|
||||
{
|
||||
x1 = points[i-1].x;
|
||||
x2 = points[i].x;
|
||||
if (between(param, x1, x2)) {
|
||||
y1 = points[i-1].y;
|
||||
y2 = points[i].y;
|
||||
if (x2 == x1)
|
||||
return y2;
|
||||
m = (y2-y1)/(x2-x1);
|
||||
y = m*(param-x1)+y1;
|
||||
return y;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
double CvFuzzyCurve::getValue()
|
||||
{
|
||||
return value;
|
||||
}
|
||||
|
||||
void CvFuzzyCurve::setValue(double _value)
|
||||
{
|
||||
value = _value;
|
||||
}
|
||||
|
||||
|
||||
CvFuzzyFunction::CvFuzzyFunction()
|
||||
{
|
||||
// nothing to do
|
||||
}
|
||||
|
||||
CvFuzzyFunction::~CvFuzzyFunction()
|
||||
{
|
||||
curves.clear();
|
||||
}
|
||||
|
||||
void CvFuzzyFunction::addCurve(CvFuzzyCurve *curve, double value)
|
||||
{
|
||||
curves.push_back(*curve);
|
||||
curve->setValue(value);
|
||||
}
|
||||
|
||||
void CvFuzzyFunction::resetValues()
|
||||
{
|
||||
int numCurves = (int)curves.size();
|
||||
for (int i = 0; i < numCurves; i++)
|
||||
curves[i].setValue(0);
|
||||
}
|
||||
|
||||
double CvFuzzyFunction::calcValue()
|
||||
{
|
||||
double s1 = 0, s2 = 0, v;
|
||||
int numCurves = (int)curves.size();
|
||||
for (int i = 0; i < numCurves; i++)
|
||||
{
|
||||
v = curves[i].getValue();
|
||||
s1 += curves[i].getCentre() * v;
|
||||
s2 += v;
|
||||
}
|
||||
|
||||
if (s2 != 0)
|
||||
return s1/s2;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
CvFuzzyCurve *CvFuzzyFunction::newCurve()
|
||||
{
|
||||
CvFuzzyCurve *c;
|
||||
c = new CvFuzzyCurve();
|
||||
addCurve(c);
|
||||
return c;
|
||||
}
|
||||
|
||||
CvFuzzyRule::CvFuzzyRule()
|
||||
{
|
||||
fuzzyInput1 = NULL;
|
||||
fuzzyInput2 = NULL;
|
||||
fuzzyOutput = NULL;
|
||||
}
|
||||
|
||||
CvFuzzyRule::~CvFuzzyRule()
|
||||
{
|
||||
if (fuzzyInput1 != NULL)
|
||||
delete fuzzyInput1;
|
||||
|
||||
if (fuzzyInput2 != NULL)
|
||||
delete fuzzyInput2;
|
||||
|
||||
if (fuzzyOutput != NULL)
|
||||
delete fuzzyOutput;
|
||||
}
|
||||
|
||||
void CvFuzzyRule::setRule(CvFuzzyCurve *c1, CvFuzzyCurve *c2, CvFuzzyCurve *o1)
|
||||
{
|
||||
fuzzyInput1 = c1;
|
||||
fuzzyInput2 = c2;
|
||||
fuzzyOutput = o1;
|
||||
}
|
||||
|
||||
double CvFuzzyRule::calcValue(double param1, double param2)
|
||||
{
|
||||
double v1, v2;
|
||||
v1 = fuzzyInput1->calcValue(param1);
|
||||
if (fuzzyInput2 != NULL)
|
||||
{
|
||||
v2 = fuzzyInput2->calcValue(param2);
|
||||
if (v1 < v2)
|
||||
return v1;
|
||||
else
|
||||
return v2;
|
||||
}
|
||||
else
|
||||
return v1;
|
||||
}
|
||||
|
||||
CvFuzzyCurve *CvFuzzyRule::getOutputCurve()
|
||||
{
|
||||
return fuzzyOutput;
|
||||
}
|
||||
|
||||
CvFuzzyController::CvFuzzyController()
|
||||
{
|
||||
// nothing to do
|
||||
}
|
||||
|
||||
CvFuzzyController::~CvFuzzyController()
|
||||
{
|
||||
int size = (int)rules.size();
|
||||
for(int i = 0; i < size; i++)
|
||||
delete rules[i];
|
||||
}
|
||||
|
||||
void CvFuzzyController::addRule(CvFuzzyCurve *c1, CvFuzzyCurve *c2, CvFuzzyCurve *o1)
|
||||
{
|
||||
CvFuzzyRule *f = new CvFuzzyRule();
|
||||
rules.push_back(f);
|
||||
f->setRule(c1, c2, o1);
|
||||
}
|
||||
|
||||
double CvFuzzyController::calcOutput(double param1, double param2)
|
||||
{
|
||||
double v;
|
||||
CvFuzzyFunction list;
|
||||
int size = (int)rules.size();
|
||||
|
||||
for(int i = 0; i < size; i++)
|
||||
{
|
||||
v = rules[i]->calcValue(param1, param2);
|
||||
if (v != 0)
|
||||
list.addCurve(rules[i]->getOutputCurve(), v);
|
||||
}
|
||||
v = list.calcValue();
|
||||
return v;
|
||||
}
|
||||
|
||||
CvFuzzyMeanShiftTracker::FuzzyResizer::FuzzyResizer()
|
||||
{
|
||||
CvFuzzyCurve *i1L, *i1M, *i1H;
|
||||
CvFuzzyCurve *oS, *oZE, *oE;
|
||||
CvFuzzyCurve *c;
|
||||
|
||||
double MedStart = 0.1, MedWidth = 0.15;
|
||||
|
||||
c = iInput.newCurve();
|
||||
c->addPoint(0, 1);
|
||||
c->addPoint(0.1, 0);
|
||||
c->setCentre(0);
|
||||
i1L = c;
|
||||
|
||||
c = iInput.newCurve();
|
||||
c->addPoint(0.05, 0);
|
||||
c->addPoint(MedStart, 1);
|
||||
c->addPoint(MedStart+MedWidth, 1);
|
||||
c->addPoint(MedStart+MedWidth+0.05, 0);
|
||||
c->setCentre(MedStart+(MedWidth/2));
|
||||
i1M = c;
|
||||
|
||||
c = iInput.newCurve();
|
||||
c->addPoint(MedStart+MedWidth, 0);
|
||||
c->addPoint(1, 1);
|
||||
c->addPoint(1000, 1);
|
||||
c->setCentre(1);
|
||||
i1H = c;
|
||||
|
||||
c = iOutput.newCurve();
|
||||
c->addPoint(-10000, 1);
|
||||
c->addPoint(-5, 1);
|
||||
c->addPoint(-0.5, 0);
|
||||
c->setCentre(-5);
|
||||
oS = c;
|
||||
|
||||
c = iOutput.newCurve();
|
||||
c->addPoint(-1, 0);
|
||||
c->addPoint(-0.05, 1);
|
||||
c->addPoint(0.05, 1);
|
||||
c->addPoint(1, 0);
|
||||
c->setCentre(0);
|
||||
oZE = c;
|
||||
|
||||
c = iOutput.newCurve();
|
||||
c->addPoint(-0.5, 0);
|
||||
c->addPoint(5, 1);
|
||||
c->addPoint(1000, 1);
|
||||
c->setCentre(5);
|
||||
oE = c;
|
||||
|
||||
fuzzyController.addRule(i1L, NULL, oS);
|
||||
fuzzyController.addRule(i1M, NULL, oZE);
|
||||
fuzzyController.addRule(i1H, NULL, oE);
|
||||
}
|
||||
|
||||
int CvFuzzyMeanShiftTracker::FuzzyResizer::calcOutput(double edgeDensity, double density)
|
||||
{
|
||||
return (int)fuzzyController.calcOutput(edgeDensity, density);
|
||||
}
|
||||
|
||||
CvFuzzyMeanShiftTracker::SearchWindow::SearchWindow()
|
||||
{
|
||||
x = 0;
|
||||
y = 0;
|
||||
width = 0;
|
||||
height = 0;
|
||||
maxWidth = 0;
|
||||
maxHeight = 0;
|
||||
xGc = 0;
|
||||
yGc = 0;
|
||||
m00 = 0;
|
||||
m01 = 0;
|
||||
m10 = 0;
|
||||
m11 = 0;
|
||||
m02 = 0;
|
||||
m20 = 0;
|
||||
ellipseHeight = 0;
|
||||
ellipseWidth = 0;
|
||||
ellipseAngle = 0;
|
||||
density = 0;
|
||||
depthLow = 0;
|
||||
depthHigh = 0;
|
||||
fuzzyResizer = NULL;
|
||||
}
|
||||
|
||||
CvFuzzyMeanShiftTracker::SearchWindow::~SearchWindow()
|
||||
{
|
||||
if (fuzzyResizer != NULL)
|
||||
delete fuzzyResizer;
|
||||
}
|
||||
|
||||
void CvFuzzyMeanShiftTracker::SearchWindow::setSize(int _x, int _y, int _width, int _height)
|
||||
{
|
||||
x = _x;
|
||||
y = _y;
|
||||
width = _width;
|
||||
height = _height;
|
||||
|
||||
if (x < 0)
|
||||
x = 0;
|
||||
|
||||
if (y < 0)
|
||||
y = 0;
|
||||
|
||||
if (x + width > maxWidth)
|
||||
width = maxWidth - x;
|
||||
|
||||
if (y + height > maxHeight)
|
||||
height = maxHeight - y;
|
||||
}
|
||||
|
||||
void CvFuzzyMeanShiftTracker::SearchWindow::initDepthValues(IplImage *maskImage, IplImage *depthMap)
|
||||
{
|
||||
unsigned int d=0, mind = 0xFFFF, maxd = 0, m0 = 0, m1 = 0, mc, dd;
|
||||
unsigned char *data = NULL;
|
||||
unsigned short *depthData = NULL;
|
||||
|
||||
for (int j = 0; j < height; j++)
|
||||
{
|
||||
data = (unsigned char *)(maskImage->imageData + (maskImage->widthStep * (j + y)) + x);
|
||||
if (depthMap)
|
||||
depthData = (unsigned short *)(depthMap->imageData + (depthMap->widthStep * (j + y)) + x);
|
||||
|
||||
for (int i = 0; i < width; i++)
|
||||
{
|
||||
if (*data)
|
||||
{
|
||||
m0 += 1;
|
||||
|
||||
if (depthData)
|
||||
{
|
||||
if (*depthData)
|
||||
{
|
||||
d = *depthData;
|
||||
m1 += d;
|
||||
if (d < mind)
|
||||
mind = d;
|
||||
if (d > maxd)
|
||||
maxd = d;
|
||||
}
|
||||
depthData++;
|
||||
}
|
||||
}
|
||||
data++;
|
||||
}
|
||||
}
|
||||
|
||||
if (m0 > 0)
|
||||
{
|
||||
mc = m1/m0;
|
||||
if ((mc - mind) > (maxd - mc))
|
||||
dd = maxd - mc;
|
||||
else
|
||||
dd = mc - mind;
|
||||
dd = dd - dd/10;
|
||||
depthHigh = mc + dd;
|
||||
depthLow = mc - dd;
|
||||
}
|
||||
else
|
||||
{
|
||||
depthHigh = 32000;
|
||||
depthLow = 0;
|
||||
}
|
||||
}
|
||||
|
||||
bool CvFuzzyMeanShiftTracker::SearchWindow::shift()
|
||||
{
|
||||
if ((xGc != (width/2)) || (yGc != (height/2)))
|
||||
{
|
||||
setSize(x + (xGc-(width/2)), y + (yGc-(height/2)), width, height);
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
void CvFuzzyMeanShiftTracker::SearchWindow::extractInfo(IplImage *maskImage, IplImage *depthMap, bool initDepth)
|
||||
{
|
||||
m00 = 0;
|
||||
m10 = 0;
|
||||
m01 = 0;
|
||||
m11 = 0;
|
||||
density = 0;
|
||||
m02 = 0;
|
||||
m20 = 0;
|
||||
ellipseHeight = 0;
|
||||
ellipseWidth = 0;
|
||||
|
||||
maxWidth = maskImage->width;
|
||||
maxHeight = maskImage->height;
|
||||
|
||||
if (initDepth)
|
||||
initDepthValues(maskImage, depthMap);
|
||||
|
||||
unsigned char *maskData = NULL;
|
||||
unsigned short *depthData = NULL, depth;
|
||||
bool isOk;
|
||||
unsigned long count;
|
||||
|
||||
verticalEdgeLeft = 0;
|
||||
verticalEdgeRight = 0;
|
||||
horizontalEdgeTop = 0;
|
||||
horizontalEdgeBottom = 0;
|
||||
|
||||
for (int j = 0; j < height; j++)
|
||||
{
|
||||
maskData = (unsigned char *)(maskImage->imageData + (maskImage->widthStep * (j + y)) + x);
|
||||
if (depthMap)
|
||||
depthData = (unsigned short *)(depthMap->imageData + (depthMap->widthStep * (j + y)) + x);
|
||||
|
||||
count = 0;
|
||||
for (int i = 0; i < width; i++)
|
||||
{
|
||||
if (*maskData)
|
||||
{
|
||||
isOk = true;
|
||||
if (depthData)
|
||||
{
|
||||
depth = (*depthData);
|
||||
if ((depth > depthHigh) || (depth < depthLow))
|
||||
isOk = false;
|
||||
|
||||
depthData++;
|
||||
}
|
||||
|
||||
if (isOk)
|
||||
{
|
||||
m00++;
|
||||
m01 += j;
|
||||
m10 += i;
|
||||
m02 += (j * j);
|
||||
m20 += (i * i);
|
||||
m11 += (j * i);
|
||||
|
||||
if (i == 0)
|
||||
verticalEdgeLeft++;
|
||||
else if (i == width-1)
|
||||
verticalEdgeRight++;
|
||||
else if (j == 0)
|
||||
horizontalEdgeTop++;
|
||||
else if (j == height-1)
|
||||
horizontalEdgeBottom++;
|
||||
|
||||
count++;
|
||||
}
|
||||
}
|
||||
maskData++;
|
||||
}
|
||||
}
|
||||
|
||||
if (m00 > 0)
|
||||
{
|
||||
xGc = (m10 / m00);
|
||||
yGc = (m01 / m00);
|
||||
|
||||
double a, b, c, e1, e2, e3;
|
||||
a = ((double)m20/(double)m00)-(xGc * xGc);
|
||||
b = 2*(((double)m11/(double)m00)-(xGc * yGc));
|
||||
c = ((double)m02/(double)m00)-(yGc * yGc);
|
||||
e1 = a+c;
|
||||
e3 = a-c;
|
||||
e2 = sqrt((b*b)+(e3*e3));
|
||||
ellipseHeight = int(sqrt(0.5*(e1+e2)));
|
||||
ellipseWidth = int(sqrt(0.5*(e1-e2)));
|
||||
if (e3 == 0)
|
||||
ellipseAngle = 0;
|
||||
else
|
||||
ellipseAngle = 0.5*atan(b/e3);
|
||||
|
||||
density = (double)m00/(double)(width * height);
|
||||
}
|
||||
else
|
||||
{
|
||||
xGc = width / 2;
|
||||
yGc = height / 2;
|
||||
ellipseHeight = 0;
|
||||
ellipseWidth = 0;
|
||||
ellipseAngle = 0;
|
||||
density = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void CvFuzzyMeanShiftTracker::SearchWindow::getResizeAttribsEdgeDensityLinear(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh) {
|
||||
int x1 = horizontalEdgeTop;
|
||||
int x2 = horizontalEdgeBottom;
|
||||
int y1 = verticalEdgeLeft;
|
||||
int y2 = verticalEdgeRight;
|
||||
int gx = (width*2)/5;
|
||||
int gy = (height*2)/5;
|
||||
int lx = width/10;
|
||||
int ly = height/10;
|
||||
|
||||
resizeDy = 0;
|
||||
resizeDh = 0;
|
||||
resizeDx = 0;
|
||||
resizeDw = 0;
|
||||
|
||||
if (x1 > gx) {
|
||||
resizeDy = -1;
|
||||
} else if (x1 < lx) {
|
||||
resizeDy = +1;
|
||||
}
|
||||
|
||||
if (x2 > gx) {
|
||||
resizeDh = resizeDy + 1;
|
||||
} else if (x2 < lx) {
|
||||
resizeDh = - (resizeDy + 1);
|
||||
} else {
|
||||
resizeDh = - resizeDy;
|
||||
}
|
||||
|
||||
if (y1 > gy) {
|
||||
resizeDx = -1;
|
||||
} else if (y1 < ly) {
|
||||
resizeDx = +1;
|
||||
}
|
||||
|
||||
if (y2 > gy) {
|
||||
resizeDw = resizeDx + 1;
|
||||
} else if (y2 < ly) {
|
||||
resizeDw = - (resizeDx + 1);
|
||||
} else {
|
||||
resizeDw = - resizeDx;
|
||||
}
|
||||
}
|
||||
|
||||
void CvFuzzyMeanShiftTracker::SearchWindow::getResizeAttribsInnerDensity(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh)
|
||||
{
|
||||
int newWidth, newHeight, dx, dy;
|
||||
double px, py;
|
||||
newWidth = int(sqrt(double(m00)*1.3));
|
||||
newHeight = int(newWidth*1.2);
|
||||
dx = (newWidth - width);
|
||||
dy = (newHeight - height);
|
||||
px = (double)xGc/(double)width;
|
||||
py = (double)yGc/(double)height;
|
||||
resizeDx = (int)(px*dx);
|
||||
resizeDy = (int)(py*dy);
|
||||
resizeDw = (int)((1-px)*dx);
|
||||
resizeDh = (int)((1-py)*dy);
|
||||
}
|
||||
|
||||
void CvFuzzyMeanShiftTracker::SearchWindow::getResizeAttribsEdgeDensityFuzzy(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh)
|
||||
{
|
||||
double dx1=0, dx2, dy1, dy2;
|
||||
|
||||
resizeDy = 0;
|
||||
resizeDh = 0;
|
||||
resizeDx = 0;
|
||||
resizeDw = 0;
|
||||
|
||||
if (fuzzyResizer == NULL)
|
||||
fuzzyResizer = new FuzzyResizer();
|
||||
|
||||
dx2 = fuzzyResizer->calcOutput(double(verticalEdgeRight)/double(height), density);
|
||||
if (dx1 == dx2)
|
||||
{
|
||||
resizeDx = int(-dx1);
|
||||
resizeDw = int(dx1+dx2);
|
||||
}
|
||||
|
||||
dy1 = fuzzyResizer->calcOutput(double(horizontalEdgeTop)/double(width), density);
|
||||
dy2 = fuzzyResizer->calcOutput(double(horizontalEdgeBottom)/double(width), density);
|
||||
|
||||
dx1 = fuzzyResizer->calcOutput(double(verticalEdgeLeft)/double(height), density);
|
||||
dx2 = fuzzyResizer->calcOutput(double(verticalEdgeRight)/double(height), density);
|
||||
//if (dx1 == dx2)
|
||||
{
|
||||
resizeDx = int(-dx1);
|
||||
resizeDw = int(dx1+dx2);
|
||||
}
|
||||
|
||||
dy1 = fuzzyResizer->calcOutput(double(horizontalEdgeTop)/double(width), density);
|
||||
dy2 = fuzzyResizer->calcOutput(double(horizontalEdgeBottom)/double(width), density);
|
||||
//if (dy1 == dy2)
|
||||
{
|
||||
resizeDy = int(-dy1);
|
||||
resizeDh = int(dy1+dy2);
|
||||
}
|
||||
}
|
||||
|
||||
bool CvFuzzyMeanShiftTracker::SearchWindow::meanShift(IplImage *maskImage, IplImage *depthMap, int maxIteration, bool initDepth)
|
||||
{
|
||||
numShifts = 0;
|
||||
do
|
||||
{
|
||||
extractInfo(maskImage, depthMap, initDepth);
|
||||
if (! shift())
|
||||
return true;
|
||||
} while (++numShifts < maxIteration);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void CvFuzzyMeanShiftTracker::findOptimumSearchWindow(SearchWindow &searchWindow, IplImage *maskImage, IplImage *depthMap, int maxIteration, int resizeMethod, bool initDepth)
|
||||
{
|
||||
int resizeDx, resizeDy, resizeDw, resizeDh;
|
||||
resizeDx = 0;
|
||||
resizeDy = 0;
|
||||
resizeDw = 0;
|
||||
resizeDh = 0;
|
||||
searchWindow.numIters = 0;
|
||||
for (int i = 0; i < maxIteration; i++)
|
||||
{
|
||||
searchWindow.numIters++;
|
||||
searchWindow.meanShift(maskImage, depthMap, MaxMeanShiftIteration, initDepth);
|
||||
switch (resizeMethod)
|
||||
{
|
||||
case rmEdgeDensityLinear :
|
||||
searchWindow.getResizeAttribsEdgeDensityLinear(resizeDx, resizeDy, resizeDw, resizeDh);
|
||||
break;
|
||||
case rmEdgeDensityFuzzy :
|
||||
//searchWindow.getResizeAttribsEdgeDensityLinear(resizeDx, resizeDy, resizeDw, resizeDh);
|
||||
searchWindow.getResizeAttribsEdgeDensityFuzzy(resizeDx, resizeDy, resizeDw, resizeDh);
|
||||
break;
|
||||
case rmInnerDensity :
|
||||
searchWindow.getResizeAttribsInnerDensity(resizeDx, resizeDy, resizeDw, resizeDh);
|
||||
break;
|
||||
default:
|
||||
searchWindow.getResizeAttribsEdgeDensityLinear(resizeDx, resizeDy, resizeDw, resizeDh);
|
||||
}
|
||||
|
||||
searchWindow.ldx = resizeDx;
|
||||
searchWindow.ldy = resizeDy;
|
||||
searchWindow.ldw = resizeDw;
|
||||
searchWindow.ldh = resizeDh;
|
||||
|
||||
if ((resizeDx == 0) && (resizeDy == 0) && (resizeDw == 0) && (resizeDh == 0))
|
||||
break;
|
||||
|
||||
searchWindow.setSize(searchWindow.x + resizeDx, searchWindow.y + resizeDy, searchWindow.width + resizeDw, searchWindow.height + resizeDh);
|
||||
}
|
||||
}
|
||||
|
||||
CvFuzzyMeanShiftTracker::CvFuzzyMeanShiftTracker()
|
||||
{
|
||||
searchMode = tsSetWindow;
|
||||
}
|
||||
|
||||
CvFuzzyMeanShiftTracker::~CvFuzzyMeanShiftTracker()
|
||||
{
|
||||
// nothing to do
|
||||
}
|
||||
|
||||
void CvFuzzyMeanShiftTracker::track(IplImage *maskImage, IplImage *depthMap, int resizeMethod, bool resetSearch, int minKernelMass)
|
||||
{
|
||||
bool initDepth = false;
|
||||
|
||||
if (resetSearch)
|
||||
searchMode = tsSetWindow;
|
||||
|
||||
switch (searchMode)
|
||||
{
|
||||
case tsDisabled:
|
||||
return;
|
||||
case tsSearching:
|
||||
return;
|
||||
case tsSetWindow:
|
||||
kernel.maxWidth = maskImage->width;
|
||||
kernel.maxHeight = maskImage->height;
|
||||
kernel.setSize(0, 0, maskImage->width, maskImage->height);
|
||||
initDepth = true;
|
||||
case tsTracking:
|
||||
searchMode = tsSearching;
|
||||
findOptimumSearchWindow(kernel, maskImage, depthMap, MaxSetSizeIteration, resizeMethod, initDepth);
|
||||
if ((kernel.density == 0) || (kernel.m00 < minKernelMass))
|
||||
searchMode = tsSetWindow;
|
||||
else
|
||||
searchMode = tsTracking;
|
||||
}
|
||||
}
|
@ -1,139 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
#include "precomp.hpp"
|
||||
|
||||
#include <iostream>
|
||||
|
||||
using namespace cv;
|
||||
|
||||
static void downsamplePoints( const Mat& src, Mat& dst, size_t count )
|
||||
{
|
||||
CV_Assert( count >= 2 );
|
||||
CV_Assert( src.cols == 1 || src.rows == 1 );
|
||||
CV_Assert( src.total() >= count );
|
||||
CV_Assert( src.type() == CV_8UC3);
|
||||
|
||||
dst.create( 1, (int)count, CV_8UC3 );
|
||||
//TODO: optimize by exploiting symmetry in the distance matrix
|
||||
Mat dists( (int)src.total(), (int)src.total(), CV_32FC1, Scalar(0) );
|
||||
if( dists.empty() )
|
||||
std::cerr << "Such big matrix cann't be created." << std::endl;
|
||||
|
||||
for( int i = 0; i < dists.rows; i++ )
|
||||
{
|
||||
for( int j = i; j < dists.cols; j++ )
|
||||
{
|
||||
float dist = (float)norm(src.at<Point3_<uchar> >(i) - src.at<Point3_<uchar> >(j));
|
||||
dists.at<float>(j, i) = dists.at<float>(i, j) = dist;
|
||||
}
|
||||
}
|
||||
|
||||
double maxVal;
|
||||
Point maxLoc;
|
||||
minMaxLoc(dists, 0, &maxVal, 0, &maxLoc);
|
||||
|
||||
dst.at<Point3_<uchar> >(0) = src.at<Point3_<uchar> >(maxLoc.x);
|
||||
dst.at<Point3_<uchar> >(1) = src.at<Point3_<uchar> >(maxLoc.y);
|
||||
|
||||
Mat activedDists( 0, dists.cols, dists.type() );
|
||||
Mat candidatePointsMask( 1, dists.cols, CV_8UC1, Scalar(255) );
|
||||
activedDists.push_back( dists.row(maxLoc.y) );
|
||||
candidatePointsMask.at<uchar>(0, maxLoc.y) = 0;
|
||||
|
||||
for( size_t i = 2; i < count; i++ )
|
||||
{
|
||||
activedDists.push_back(dists.row(maxLoc.x));
|
||||
candidatePointsMask.at<uchar>(0, maxLoc.x) = 0;
|
||||
|
||||
Mat minDists;
|
||||
reduce( activedDists, minDists, 0, REDUCE_MIN );
|
||||
minMaxLoc( minDists, 0, &maxVal, 0, &maxLoc, candidatePointsMask );
|
||||
dst.at<Point3_<uchar> >((int)i) = src.at<Point3_<uchar> >(maxLoc.x);
|
||||
}
|
||||
}
|
||||
|
||||
void cv::generateColors( std::vector<Scalar>& colors, size_t count, size_t factor )
|
||||
{
|
||||
if( count < 1 )
|
||||
return;
|
||||
|
||||
colors.resize(count);
|
||||
|
||||
if( count == 1 )
|
||||
{
|
||||
colors[0] = Scalar(0,0,255); // red
|
||||
return;
|
||||
}
|
||||
if( count == 2 )
|
||||
{
|
||||
colors[0] = Scalar(0,0,255); // red
|
||||
colors[1] = Scalar(0,255,0); // green
|
||||
return;
|
||||
}
|
||||
|
||||
// Generate a set of colors in RGB space. A size of the set is severel times (=factor) larger then
|
||||
// the needed count of colors.
|
||||
Mat bgr( 1, (int)(count*factor), CV_8UC3 );
|
||||
randu( bgr, 0, 256 );
|
||||
|
||||
// Convert the colors set to Lab space.
|
||||
// Distances between colors in this space correspond a human perception.
|
||||
Mat lab;
|
||||
cvtColor( bgr, lab, COLOR_BGR2Lab);
|
||||
|
||||
// Subsample colors from the generated set so that
|
||||
// to maximize the minimum distances between each other.
|
||||
// Douglas-Peucker algorithm is used for this.
|
||||
Mat lab_subset;
|
||||
downsamplePoints( lab, lab_subset, count );
|
||||
|
||||
// Convert subsampled colors back to RGB
|
||||
Mat bgr_subset;
|
||||
cvtColor( lab_subset, bgr_subset, COLOR_Lab2BGR );
|
||||
|
||||
CV_Assert( bgr_subset.total() == count );
|
||||
for( size_t i = 0; i < count; i++ )
|
||||
{
|
||||
Point3_<uchar> c = bgr_subset.at<Point3_<uchar> >((int)i);
|
||||
colors[i] = Scalar(c.x, c.y, c.z);
|
||||
}
|
||||
}
|
@ -1,235 +0,0 @@
|
||||
//*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
#include "precomp.hpp"
|
||||
#include "opencv2/contrib/hybridtracker.hpp"
|
||||
|
||||
using namespace cv;
|
||||
|
||||
CvHybridTrackerParams::CvHybridTrackerParams(float _ft_tracker_weight, float _ms_tracker_weight,
|
||||
CvFeatureTrackerParams _ft_params,
|
||||
CvMeanShiftTrackerParams _ms_params,
|
||||
CvMotionModel)
|
||||
{
|
||||
ft_tracker_weight = _ft_tracker_weight;
|
||||
ms_tracker_weight = _ms_tracker_weight;
|
||||
ft_params = _ft_params;
|
||||
ms_params = _ms_params;
|
||||
}
|
||||
|
||||
CvMeanShiftTrackerParams::CvMeanShiftTrackerParams(int _tracking_type, CvTermCriteria _term_crit)
|
||||
{
|
||||
tracking_type = _tracking_type;
|
||||
term_crit = _term_crit;
|
||||
}
|
||||
|
||||
CvHybridTracker::CvHybridTracker() {
|
||||
|
||||
}
|
||||
|
||||
CvHybridTracker::CvHybridTracker(HybridTrackerParams _params) :
|
||||
params(_params) {
|
||||
params.ft_params.feature_type = CvFeatureTrackerParams::SIFT;
|
||||
mstracker = new CvMeanShiftTracker(params.ms_params);
|
||||
fttracker = new CvFeatureTracker(params.ft_params);
|
||||
}
|
||||
|
||||
CvHybridTracker::~CvHybridTracker() {
|
||||
if (mstracker != NULL)
|
||||
delete mstracker;
|
||||
if (fttracker != NULL)
|
||||
delete fttracker;
|
||||
}
|
||||
|
||||
inline float CvHybridTracker::getL2Norm(Point2f p1, Point2f p2) {
|
||||
float distance = (p1.x - p2.x) * (p1.x - p2.x) + (p1.y - p2.y) * (p1.y
|
||||
- p2.y);
|
||||
return std::sqrt(distance);
|
||||
}
|
||||
|
||||
Mat CvHybridTracker::getDistanceProjection(Mat image, Point2f center) {
|
||||
Mat hist(image.size(), CV_64F);
|
||||
|
||||
double lu = getL2Norm(Point(0, 0), center);
|
||||
double ru = getL2Norm(Point(0, image.size().width), center);
|
||||
double rd = getL2Norm(Point(image.size().height, image.size().width),
|
||||
center);
|
||||
double ld = getL2Norm(Point(image.size().height, 0), center);
|
||||
|
||||
double max = (lu < ru) ? lu : ru;
|
||||
max = (max < rd) ? max : rd;
|
||||
max = (max < ld) ? max : ld;
|
||||
|
||||
for (int i = 0; i < hist.rows; i++)
|
||||
for (int j = 0; j < hist.cols; j++)
|
||||
hist.at<double> (i, j) = 1.0 - (getL2Norm(Point(i, j), center)
|
||||
/ max);
|
||||
|
||||
return hist;
|
||||
}
|
||||
|
||||
Mat CvHybridTracker::getGaussianProjection(Mat image, int ksize, double sigma,
|
||||
Point2f center) {
|
||||
Mat kernel = getGaussianKernel(ksize, sigma, CV_64F);
|
||||
double max = kernel.at<double> (ksize / 2);
|
||||
|
||||
Mat hist(image.size(), CV_64F);
|
||||
for (int i = 0; i < hist.rows; i++)
|
||||
for (int j = 0; j < hist.cols; j++) {
|
||||
int pos = cvRound(getL2Norm(Point(i, j), center));
|
||||
if (pos < ksize / 2.0)
|
||||
hist.at<double> (i, j) = 1.0 - (kernel.at<double> (pos) / max);
|
||||
}
|
||||
|
||||
return hist;
|
||||
}
|
||||
|
||||
void CvHybridTracker::newTracker(Mat image, Rect selection) {
|
||||
prev_proj = Mat::zeros(image.size(), CV_64FC1);
|
||||
prev_center = Point2f(selection.x + selection.width / 2.0f, selection.y
|
||||
+ selection.height / 2.0f);
|
||||
prev_window = selection;
|
||||
|
||||
mstracker->newTrackingWindow(image, selection);
|
||||
fttracker->newTrackingWindow(image, selection);
|
||||
|
||||
samples = cvCreateMat(2, 1, CV_32FC1);
|
||||
labels = cvCreateMat(2, 1, CV_32SC1);
|
||||
|
||||
ittr = 0;
|
||||
}
|
||||
|
||||
void CvHybridTracker::updateTracker(Mat image) {
|
||||
ittr++;
|
||||
|
||||
//copy over clean images: TODO
|
||||
mstracker->updateTrackingWindow(image);
|
||||
fttracker->updateTrackingWindowWithFlow(image);
|
||||
|
||||
if (params.motion_model == CvMotionModel::EM)
|
||||
updateTrackerWithEM(image);
|
||||
else
|
||||
updateTrackerWithLowPassFilter(image);
|
||||
|
||||
// Regression to find new weights
|
||||
Point2f ms_center = mstracker->getTrackingEllipse().center;
|
||||
Point2f ft_center = fttracker->getTrackingCenter();
|
||||
|
||||
#ifdef DEBUG_HYTRACKER
|
||||
circle(image, ms_center, 3, Scalar(0, 0, 255), -1, 8);
|
||||
circle(image, ft_center, 3, Scalar(255, 0, 0), -1, 8);
|
||||
putText(image, "ms", Point(ms_center.x+2, ms_center.y), FONT_HERSHEY_PLAIN, 0.75, Scalar(255, 255, 255));
|
||||
putText(image, "ft", Point(ft_center.x+2, ft_center.y), FONT_HERSHEY_PLAIN, 0.75, Scalar(255, 255, 255));
|
||||
#endif
|
||||
|
||||
double ms_len = getL2Norm(ms_center, curr_center);
|
||||
double ft_len = getL2Norm(ft_center, curr_center);
|
||||
double total_len = ms_len + ft_len;
|
||||
|
||||
params.ms_tracker_weight *= (ittr - 1);
|
||||
params.ms_tracker_weight += (float)((ms_len / total_len));
|
||||
params.ms_tracker_weight /= ittr;
|
||||
params.ft_tracker_weight *= (ittr - 1);
|
||||
params.ft_tracker_weight += (float)((ft_len / total_len));
|
||||
params.ft_tracker_weight /= ittr;
|
||||
|
||||
circle(image, prev_center, 3, Scalar(0, 0, 0), -1, 8);
|
||||
circle(image, curr_center, 3, Scalar(255, 255, 255), -1, 8);
|
||||
|
||||
prev_center = curr_center;
|
||||
prev_window.x = (int)(curr_center.x-prev_window.width/2.0);
|
||||
prev_window.y = (int)(curr_center.y-prev_window.height/2.0);
|
||||
|
||||
mstracker->setTrackingWindow(prev_window);
|
||||
fttracker->setTrackingWindow(prev_window);
|
||||
}
|
||||
|
||||
void CvHybridTracker::updateTrackerWithEM(Mat image) {
|
||||
Mat ms_backproj = mstracker->getHistogramProjection(CV_64F);
|
||||
Mat ms_distproj = getDistanceProjection(image, mstracker->getTrackingCenter());
|
||||
Mat ms_proj = ms_backproj.mul(ms_distproj);
|
||||
|
||||
float dist_err = getL2Norm(mstracker->getTrackingCenter(), fttracker->getTrackingCenter());
|
||||
Mat ft_gaussproj = getGaussianProjection(image, cvRound(dist_err), -1, fttracker->getTrackingCenter());
|
||||
Mat ft_distproj = getDistanceProjection(image, fttracker->getTrackingCenter());
|
||||
Mat ft_proj = ft_gaussproj.mul(ft_distproj);
|
||||
|
||||
Mat proj = params.ms_tracker_weight * ms_proj + params.ft_tracker_weight * ft_proj + prev_proj;
|
||||
|
||||
int sample_count = countNonZero(proj);
|
||||
cvReleaseMat(&samples);
|
||||
cvReleaseMat(&labels);
|
||||
samples = cvCreateMat(sample_count, 2, CV_32FC1);
|
||||
labels = cvCreateMat(sample_count, 1, CV_32SC1);
|
||||
|
||||
int count = 0;
|
||||
for (int i = 0; i < proj.rows; i++)
|
||||
for (int j = 0; j < proj.cols; j++)
|
||||
if (proj.at<double> (i, j) > 0) {
|
||||
samples->data.fl[count * 2] = (float)i;
|
||||
samples->data.fl[count * 2 + 1] = (float)j;
|
||||
count++;
|
||||
}
|
||||
|
||||
cv::Mat lbls;
|
||||
|
||||
EM em_model(1, EM::COV_MAT_SPHERICAL, TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 10000, 0.001));
|
||||
em_model.train(cvarrToMat(samples), noArray(), lbls);
|
||||
if(labels)
|
||||
lbls.copyTo(cvarrToMat(labels));
|
||||
|
||||
Mat em_means = em_model.get<Mat>("means");
|
||||
curr_center.x = (float)em_means.at<float>(0, 0);
|
||||
curr_center.y = (float)em_means.at<float>(0, 1);
|
||||
}
|
||||
|
||||
void CvHybridTracker::updateTrackerWithLowPassFilter(Mat) {
|
||||
RotatedRect ms_track = mstracker->getTrackingEllipse();
|
||||
Point2f ft_center = fttracker->getTrackingCenter();
|
||||
|
||||
float a = params.low_pass_gain;
|
||||
curr_center.x = (1 - a) * prev_center.x + a * (params.ms_tracker_weight * ms_track.center.x + params.ft_tracker_weight * ft_center.x);
|
||||
curr_center.y = (1 - a) * prev_center.y + a * (params.ms_tracker_weight * ms_track.center.y + params.ft_tracker_weight * ft_center.y);
|
||||
}
|
||||
|
||||
Rect CvHybridTracker::getTrackingWindow() {
|
||||
return prev_window;
|
||||
}
|
@ -1,204 +0,0 @@
|
||||
#include "opencv2/contrib.hpp"
|
||||
#include "cvconfig.h"
|
||||
|
||||
#if defined(WIN32) || defined(_WIN32)
|
||||
#include <windows.h>
|
||||
#include <tchar.h>
|
||||
#else
|
||||
#include <dirent.h>
|
||||
#endif
|
||||
|
||||
namespace cv
|
||||
{
|
||||
std::vector<String> Directory::GetListFiles( const String& path, const String & exten, bool addPath )
|
||||
{
|
||||
std::vector<String> list;
|
||||
list.clear();
|
||||
String path_f = path + "/" + exten;
|
||||
#ifdef WIN32
|
||||
#ifdef HAVE_WINRT
|
||||
WIN32_FIND_DATAW FindFileData;
|
||||
#else
|
||||
WIN32_FIND_DATAA FindFileData;
|
||||
#endif
|
||||
HANDLE hFind;
|
||||
|
||||
#ifdef HAVE_WINRT
|
||||
wchar_t wpath[MAX_PATH];
|
||||
size_t copied = mbstowcs(wpath, path_f.c_str(), MAX_PATH);
|
||||
CV_Assert((copied != MAX_PATH) && (copied != (size_t)-1));
|
||||
hFind = FindFirstFileExW(wpath, FindExInfoStandard, &FindFileData, FindExSearchNameMatch, NULL, 0);
|
||||
#else
|
||||
hFind = FindFirstFileA((LPCSTR)path_f.c_str(), &FindFileData);
|
||||
#endif
|
||||
if (hFind == INVALID_HANDLE_VALUE)
|
||||
{
|
||||
return list;
|
||||
}
|
||||
else
|
||||
{
|
||||
do
|
||||
{
|
||||
if (FindFileData.dwFileAttributes == FILE_ATTRIBUTE_NORMAL ||
|
||||
FindFileData.dwFileAttributes == FILE_ATTRIBUTE_ARCHIVE ||
|
||||
FindFileData.dwFileAttributes == FILE_ATTRIBUTE_HIDDEN ||
|
||||
FindFileData.dwFileAttributes == FILE_ATTRIBUTE_SYSTEM ||
|
||||
FindFileData.dwFileAttributes == FILE_ATTRIBUTE_READONLY)
|
||||
{
|
||||
char* fname;
|
||||
#ifdef HAVE_WINRT
|
||||
char fname_tmp[MAX_PATH] = {0};
|
||||
size_t copied = wcstombs(fname_tmp, FindFileData.cFileName, MAX_PATH);
|
||||
CV_Assert((copied != MAX_PATH) && (copied != (size_t)-1));
|
||||
fname = fname_tmp;
|
||||
#else
|
||||
fname = FindFileData.cFileName;
|
||||
#endif
|
||||
if (addPath)
|
||||
list.push_back(path + "/" + String(fname));
|
||||
else
|
||||
list.push_back(String(fname));
|
||||
}
|
||||
}
|
||||
#ifdef HAVE_WINRT
|
||||
while(FindNextFileW(hFind, &FindFileData));
|
||||
#else
|
||||
while(FindNextFileA(hFind, &FindFileData));
|
||||
#endif
|
||||
FindClose(hFind);
|
||||
}
|
||||
#else
|
||||
(void)addPath;
|
||||
DIR *dp;
|
||||
struct dirent *dirp;
|
||||
if((dp = opendir(path.c_str())) == NULL)
|
||||
{
|
||||
return list;
|
||||
}
|
||||
|
||||
while ((dirp = readdir(dp)) != NULL)
|
||||
{
|
||||
if (dirp->d_type == DT_REG)
|
||||
{
|
||||
if (exten.compare("*") == 0)
|
||||
list.push_back(static_cast<String>(dirp->d_name));
|
||||
else
|
||||
if (String(dirp->d_name).find(exten) != String::npos)
|
||||
list.push_back(static_cast<String>(dirp->d_name));
|
||||
}
|
||||
}
|
||||
closedir(dp);
|
||||
#endif
|
||||
|
||||
return list;
|
||||
}
|
||||
|
||||
std::vector<String> Directory::GetListFolders( const String& path, const String & exten, bool addPath )
|
||||
{
|
||||
std::vector<String> list;
|
||||
String path_f = path + "/" + exten;
|
||||
list.clear();
|
||||
#ifdef WIN32
|
||||
#ifdef HAVE_WINRT
|
||||
WIN32_FIND_DATAW FindFileData;
|
||||
#else
|
||||
WIN32_FIND_DATAA FindFileData;
|
||||
#endif
|
||||
HANDLE hFind;
|
||||
|
||||
#ifdef HAVE_WINRT
|
||||
wchar_t wpath [MAX_PATH];
|
||||
size_t copied = mbstowcs(wpath, path_f.c_str(), path_f.size());
|
||||
CV_Assert((copied != MAX_PATH) && (copied != (size_t)-1));
|
||||
|
||||
hFind = FindFirstFileExW(wpath, FindExInfoStandard, &FindFileData, FindExSearchNameMatch, NULL, 0);
|
||||
#else
|
||||
hFind = FindFirstFileA((LPCSTR)path_f.c_str(), &FindFileData);
|
||||
#endif
|
||||
if (hFind == INVALID_HANDLE_VALUE)
|
||||
{
|
||||
return list;
|
||||
}
|
||||
else
|
||||
{
|
||||
do
|
||||
{
|
||||
#ifdef HAVE_WINRT
|
||||
if (FindFileData.dwFileAttributes == FILE_ATTRIBUTE_DIRECTORY &&
|
||||
wcscmp(FindFileData.cFileName, L".") != 0 &&
|
||||
wcscmp(FindFileData.cFileName, L"..") != 0)
|
||||
#else
|
||||
if (FindFileData.dwFileAttributes == FILE_ATTRIBUTE_DIRECTORY &&
|
||||
strcmp(FindFileData.cFileName, ".") != 0 &&
|
||||
strcmp(FindFileData.cFileName, "..") != 0)
|
||||
#endif
|
||||
{
|
||||
char* fname;
|
||||
#ifdef HAVE_WINRT
|
||||
char fname_tmp[MAX_PATH];
|
||||
size_t copied = wcstombs(fname_tmp, FindFileData.cFileName, MAX_PATH);
|
||||
CV_Assert((copied != MAX_PATH) && (copied != (size_t)-1));
|
||||
fname = fname_tmp;
|
||||
#else
|
||||
fname = FindFileData.cFileName;
|
||||
#endif
|
||||
|
||||
if (addPath)
|
||||
list.push_back(path + "/" + String(fname));
|
||||
else
|
||||
list.push_back(String(fname));
|
||||
}
|
||||
}
|
||||
#ifdef HAVE_WINRT
|
||||
while(FindNextFileW(hFind, &FindFileData));
|
||||
#else
|
||||
while(FindNextFileA(hFind, &FindFileData));
|
||||
#endif
|
||||
FindClose(hFind);
|
||||
}
|
||||
|
||||
#else
|
||||
(void)addPath;
|
||||
DIR *dp;
|
||||
struct dirent *dirp;
|
||||
if((dp = opendir(path_f.c_str())) == NULL)
|
||||
{
|
||||
return list;
|
||||
}
|
||||
|
||||
while ((dirp = readdir(dp)) != NULL)
|
||||
{
|
||||
if (dirp->d_type == DT_DIR &&
|
||||
strcmp(dirp->d_name, ".") != 0 &&
|
||||
strcmp(dirp->d_name, "..") != 0 )
|
||||
{
|
||||
if (exten.compare("*") == 0)
|
||||
list.push_back(static_cast<String>(dirp->d_name));
|
||||
else
|
||||
if (String(dirp->d_name).find(exten) != String::npos)
|
||||
list.push_back(static_cast<String>(dirp->d_name));
|
||||
}
|
||||
}
|
||||
closedir(dp);
|
||||
#endif
|
||||
|
||||
return list;
|
||||
}
|
||||
|
||||
std::vector<String> Directory::GetListFilesR ( const String& path, const String & exten, bool addPath )
|
||||
{
|
||||
std::vector<String> list = Directory::GetListFiles(path, exten, addPath);
|
||||
|
||||
std::vector<String> dirs = Directory::GetListFolders(path, exten, addPath);
|
||||
|
||||
std::vector<String>::const_iterator it;
|
||||
for (it = dirs.begin(); it != dirs.end(); ++it)
|
||||
{
|
||||
std::vector<String> cl = Directory::GetListFiles(*it, exten, addPath);
|
||||
list.insert(list.end(), cl.begin(), cl.end());
|
||||
}
|
||||
|
||||
return list;
|
||||
}
|
||||
|
||||
}
|
@ -1,651 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2012, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The names of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
/*******************************************************************************************
|
||||
|
||||
The LogPolar Blind Spot Model code has been contributed by Fabio Solari and Manuela Chessa.
|
||||
|
||||
More details can be found in:
|
||||
|
||||
M. Chessa, S. P. Sabatini, F. Solari and F. Tatti (2011)
|
||||
A Quantitative Comparison of Speed and Reliability for Log-Polar Mapping Techniques,
|
||||
Computer Vision Systems - 8th International Conference,
|
||||
ICVS 2011, Sophia Antipolis, France, September 20-22, 2011
|
||||
(http://dx.doi.org/10.1007/978-3-642-23968-7_5)
|
||||
|
||||
********************************************************************************************/
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
#include <cmath>
|
||||
#include <vector>
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
//------------------------------------interp-------------------------------------------
|
||||
LogPolar_Interp::LogPolar_Interp(int w, int h, Point2i center, int _R, double _ro0, int _interp, int full, int _s, int sp)
|
||||
{
|
||||
if ( (center.x!=w/2 || center.y!=h/2) && full==0) full=1;
|
||||
|
||||
if (center.x<0) center.x=0;
|
||||
if (center.y<0) center.y=0;
|
||||
if (center.x>=w) center.x=w-1;
|
||||
if (center.y>=h) center.y=h-1;
|
||||
|
||||
if (full){
|
||||
int rtmp;
|
||||
|
||||
if (center.x<=w/2 && center.y>=h/2)
|
||||
rtmp=(int)std::sqrt((float)center.y*center.y + (float)(w-center.x)*(w-center.x));
|
||||
else if (center.x>=w/2 && center.y>=h/2)
|
||||
rtmp=(int)std::sqrt((float)center.y*center.y + (float)center.x*center.x);
|
||||
else if (center.x>=w/2 && center.y<=h/2)
|
||||
rtmp=(int)std::sqrt((float)(h-center.y)*(h-center.y) + (float)center.x*center.x);
|
||||
else //if (center.x<=w/2 && center.y<=h/2)
|
||||
rtmp=(int)std::sqrt((float)(h-center.y)*(h-center.y) + (float)(w-center.x)*(w-center.x));
|
||||
|
||||
M=2*rtmp; N=2*rtmp;
|
||||
|
||||
top = M/2 - center.y;
|
||||
bottom = M/2 - (h-center.y);
|
||||
left = M/2 - center.x;
|
||||
right = M/2 - (w - center.x);
|
||||
|
||||
}else{
|
||||
top=bottom=left=right=0;
|
||||
M=w; N=h;
|
||||
}
|
||||
|
||||
if (sp){
|
||||
int jc=M/2-1, ic=N/2-1;
|
||||
int _romax=std::min(ic, jc);
|
||||
double _a=std::exp(std::log((double)(_romax/2-1)/(double)ro0)/(double)R);
|
||||
S=(int) floor(2*CV_PI/(_a-1)+0.5);
|
||||
}
|
||||
|
||||
interp=_interp;
|
||||
|
||||
create_map(M, N, _R, _s, _ro0);
|
||||
}
|
||||
|
||||
void LogPolar_Interp::create_map(int _M, int _n, int _R, int _s, double _ro0)
|
||||
{
|
||||
M=_M;
|
||||
N=_n;
|
||||
R=_R;
|
||||
S=_s;
|
||||
ro0=_ro0;
|
||||
|
||||
int jc=N/2-1, ic=M/2-1;
|
||||
romax=std::min(ic, jc);
|
||||
a=std::exp(std::log((double)romax/(double)ro0)/(double)R);
|
||||
q=((double)S)/(2*CV_PI);
|
||||
|
||||
Rsri = Mat::zeros(S,R,CV_32FC1);
|
||||
Csri = Mat::zeros(S,R,CV_32FC1);
|
||||
ETAyx = Mat::zeros(N,M,CV_32FC1);
|
||||
CSIyx = Mat::zeros(N,M,CV_32FC1);
|
||||
|
||||
for(int v=0; v<S; v++)
|
||||
{
|
||||
for(int u=0; u<R; u++)
|
||||
{
|
||||
Rsri.at<float>(v,u)=(float)(ro0*std::pow(a,u)*sin(v/q)+jc);
|
||||
Csri.at<float>(v,u)=(float)(ro0*std::pow(a,u)*cos(v/q)+ic);
|
||||
}
|
||||
}
|
||||
|
||||
for(int j=0; j<N; j++)
|
||||
{
|
||||
for(int i=0; i<M; i++)
|
||||
{
|
||||
double theta;
|
||||
if(i>=ic)
|
||||
theta=atan((double)(j-jc)/(double)(i-ic));
|
||||
else
|
||||
theta=atan((double)(j-jc)/(double)(i-ic))+CV_PI;
|
||||
|
||||
if(theta<0)
|
||||
theta+=2*CV_PI;
|
||||
|
||||
ETAyx.at<float>(j,i)=(float)(q*theta);
|
||||
|
||||
double ro2=(j-jc)*(j-jc)+(i-ic)*(i-ic);
|
||||
CSIyx.at<float>(j,i)=(float)(0.5*std::log(ro2/(ro0*ro0))/std::log(a));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const Mat LogPolar_Interp::to_cortical(const Mat &source)
|
||||
{
|
||||
Mat out(S,R,CV_8UC1,Scalar(0));
|
||||
|
||||
Mat source_border;
|
||||
copyMakeBorder(source,source_border,top,bottom,left,right,BORDER_CONSTANT,Scalar(0));
|
||||
|
||||
remap(source_border,out,Csri,Rsri,interp);
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
|
||||
const Mat LogPolar_Interp::to_cartesian(const Mat &source)
|
||||
{
|
||||
Mat out(N,M,CV_8UC1,Scalar(0));
|
||||
|
||||
Mat source_border;
|
||||
|
||||
if (interp==INTER_NEAREST || interp==INTER_LINEAR){
|
||||
copyMakeBorder(source,source_border,0,1,0,0,BORDER_CONSTANT,Scalar(0));
|
||||
Mat rowS0 = source_border.row(S);
|
||||
source_border.row(0).copyTo(rowS0);
|
||||
} else if (interp==INTER_CUBIC){
|
||||
copyMakeBorder(source,source_border,0,2,0,0,BORDER_CONSTANT,Scalar(0));
|
||||
Mat rowS0 = source_border.row(S);
|
||||
Mat rowS1 = source_border.row(S+1);
|
||||
source_border.row(0).copyTo(rowS0);
|
||||
source_border.row(1).copyTo(rowS1);
|
||||
} else if (interp==INTER_LANCZOS4){
|
||||
copyMakeBorder(source,source_border,0,4,0,0,BORDER_CONSTANT,Scalar(0));
|
||||
Mat rowS0 = source_border.row(S);
|
||||
Mat rowS1 = source_border.row(S+1);
|
||||
Mat rowS2 = source_border.row(S+2);
|
||||
Mat rowS3 = source_border.row(S+3);
|
||||
source_border.row(0).copyTo(rowS0);
|
||||
source_border.row(1).copyTo(rowS1);
|
||||
source_border.row(2).copyTo(rowS2);
|
||||
source_border.row(3).copyTo(rowS3);
|
||||
}
|
||||
remap(source_border,out,CSIyx,ETAyx,interp);
|
||||
|
||||
Mat out_cropped=out(Range(top,N-1-bottom),Range(left,M-1-right));
|
||||
|
||||
return out_cropped;
|
||||
}
|
||||
|
||||
LogPolar_Interp::~LogPolar_Interp()
|
||||
{
|
||||
}
|
||||
|
||||
//------------------------------------overlapping----------------------------------
|
||||
|
||||
LogPolar_Overlapping::LogPolar_Overlapping(int w, int h, Point2i center, int _R, double _ro0, int full, int _s, int sp)
|
||||
{
|
||||
if ( (center.x!=w/2 || center.y!=h/2) && full==0) full=1;
|
||||
|
||||
if (center.x<0) center.x=0;
|
||||
if (center.y<0) center.y=0;
|
||||
if (center.x>=w) center.x=w-1;
|
||||
if (center.y>=h) center.y=h-1;
|
||||
|
||||
if (full){
|
||||
int rtmp;
|
||||
|
||||
if (center.x<=w/2 && center.y>=h/2)
|
||||
rtmp=(int)std::sqrt((float)center.y*center.y + (float)(w-center.x)*(w-center.x));
|
||||
else if (center.x>=w/2 && center.y>=h/2)
|
||||
rtmp=(int)std::sqrt((float)center.y*center.y + (float)center.x*center.x);
|
||||
else if (center.x>=w/2 && center.y<=h/2)
|
||||
rtmp=(int)std::sqrt((float)(h-center.y)*(h-center.y) + (float)center.x*center.x);
|
||||
else //if (center.x<=w/2 && center.y<=h/2)
|
||||
rtmp=(int)std::sqrt((float)(h-center.y)*(h-center.y) + (float)(w-center.x)*(w-center.x));
|
||||
|
||||
M=2*rtmp; N=2*rtmp;
|
||||
|
||||
top = M/2 - center.y;
|
||||
bottom = M/2 - (h-center.y);
|
||||
left = M/2 - center.x;
|
||||
right = M/2 - (w - center.x);
|
||||
|
||||
}else{
|
||||
top=bottom=left=right=0;
|
||||
M=w; N=h;
|
||||
}
|
||||
|
||||
|
||||
if (sp){
|
||||
int jc=M/2-1, ic=N/2-1;
|
||||
int _romax=std::min(ic, jc);
|
||||
double _a=std::exp(std::log((double)(_romax/2-1)/(double)ro0)/(double)R);
|
||||
S=(int) floor(2*CV_PI/(_a-1)+0.5);
|
||||
}
|
||||
|
||||
create_map(M, N, _R, _s, _ro0);
|
||||
}
|
||||
|
||||
void LogPolar_Overlapping::create_map(int _M, int _n, int _R, int _s, double _ro0)
|
||||
{
|
||||
M=_M;
|
||||
N=_n;
|
||||
R=_R;
|
||||
S=_s;
|
||||
ro0=_ro0;
|
||||
|
||||
int jc=N/2-1, ic=M/2-1;
|
||||
romax=std::min(ic, jc);
|
||||
a=std::exp(std::log((double)romax/(double)ro0)/(double)R);
|
||||
q=((double)S)/(2*CV_PI);
|
||||
ind1=0;
|
||||
|
||||
Rsri=Mat::zeros(S,R,CV_32FC1);
|
||||
Csri=Mat::zeros(S,R,CV_32FC1);
|
||||
ETAyx=Mat::zeros(N,M,CV_32FC1);
|
||||
CSIyx=Mat::zeros(N,M,CV_32FC1);
|
||||
Rsr.resize(R*S);
|
||||
Csr.resize(R*S);
|
||||
Wsr.resize(R);
|
||||
w_ker_2D.resize(R*S);
|
||||
|
||||
for(int v=0; v<S; v++)
|
||||
{
|
||||
for(int u=0; u<R; u++)
|
||||
{
|
||||
Rsri.at<float>(v,u)=(float)(ro0*std::pow(a,u)*sin(v/q)+jc);
|
||||
Csri.at<float>(v,u)=(float)(ro0*std::pow(a,u)*cos(v/q)+ic);
|
||||
Rsr[v*R+u]=(int)floor(Rsri.at<float>(v,u));
|
||||
Csr[v*R+u]=(int)floor(Csri.at<float>(v,u));
|
||||
}
|
||||
}
|
||||
|
||||
bool done=false;
|
||||
|
||||
for(int i=0; i<R; i++)
|
||||
{
|
||||
Wsr[i]=ro0*(a-1)*std::pow(a,i-1);
|
||||
if((Wsr[i]>1)&&(done==false))
|
||||
{
|
||||
ind1=i;
|
||||
done =true;
|
||||
}
|
||||
}
|
||||
|
||||
for(int j=0; j<N; j++)
|
||||
{
|
||||
for(int i=0; i<M; i++)//mdf
|
||||
{
|
||||
double theta;
|
||||
if(i>=ic)
|
||||
theta=atan((double)(j-jc)/(double)(i-ic));
|
||||
else
|
||||
theta=atan((double)(j-jc)/(double)(i-ic))+CV_PI;
|
||||
|
||||
if(theta<0)
|
||||
theta+=2*CV_PI;
|
||||
|
||||
ETAyx.at<float>(j,i)=(float)(q*theta);
|
||||
|
||||
double ro2=(j-jc)*(j-jc)+(i-ic)*(i-ic);
|
||||
CSIyx.at<float>(j,i)=(float)(0.5*std::log(ro2/(ro0*ro0))/std::log(a));
|
||||
}
|
||||
}
|
||||
|
||||
for(int v=0; v<S; v++)
|
||||
for(int u=ind1; u<R; u++)
|
||||
{
|
||||
//double sigma=Wsr[u]/2.0;
|
||||
double sigma=Wsr[u]/3.0;//modf
|
||||
int w=(int) floor(3*sigma+0.5);
|
||||
w_ker_2D[v*R+u].w=w;
|
||||
w_ker_2D[v*R+u].weights.resize((2*w+1)*(2*w+1));
|
||||
double dx=Csri.at<float>(v,u)-Csr[v*R+u];
|
||||
double dy=Rsri.at<float>(v,u)-Rsr[v*R+u];
|
||||
double tot=0;
|
||||
for(int j=0; j<2*w+1; j++)
|
||||
for(int i=0; i<2*w+1; i++)
|
||||
{
|
||||
(w_ker_2D[v*R+u].weights)[j*(2*w+1)+i]=std::exp(-(std::pow(i-w-dx, 2)+std::pow(j-w-dy, 2))/(2*sigma*sigma));
|
||||
tot+=(w_ker_2D[v*R+u].weights)[j*(2*w+1)+i];
|
||||
}
|
||||
for(int j=0; j<(2*w+1); j++)
|
||||
for(int i=0; i<(2*w+1); i++)
|
||||
(w_ker_2D[v*R+u].weights)[j*(2*w+1)+i]/=tot;
|
||||
}
|
||||
}
|
||||
|
||||
const Mat LogPolar_Overlapping::to_cortical(const Mat &source)
|
||||
{
|
||||
Mat out(S,R,CV_8UC1,Scalar(0));
|
||||
|
||||
Mat source_border;
|
||||
copyMakeBorder(source,source_border,top,bottom,left,right,BORDER_CONSTANT,Scalar(0));
|
||||
|
||||
remap(source_border,out,Csri,Rsri,INTER_LINEAR);
|
||||
|
||||
int wm=w_ker_2D[R-1].w;
|
||||
std::vector<int> IMG((M+2*wm+1)*(N+2*wm+1), 0);
|
||||
|
||||
for(int j=0; j<N; j++)
|
||||
for(int i=0; i<M; i++)
|
||||
IMG[(M+2*wm+1)*(j+wm)+i+wm]=source_border.at<uchar>(j,i);
|
||||
|
||||
for(int v=0; v<S; v++)
|
||||
for(int u=ind1; u<R; u++)
|
||||
{
|
||||
int w=w_ker_2D[v*R+u].w;
|
||||
double tmp=0;
|
||||
for(int rf=0; rf<(2*w+1); rf++)
|
||||
{
|
||||
for(int cf=0; cf<(2*w+1); cf++)
|
||||
{
|
||||
double weight=(w_ker_2D[v*R+u]).weights[rf*(2*w+1)+cf];
|
||||
tmp+=IMG[(M+2*wm+1)*((rf-w)+Rsr[v*R+u]+wm)+((cf-w)+Csr[v*R+u]+wm)]*weight;
|
||||
}
|
||||
}
|
||||
out.at<uchar>(v,u)=(uchar) floor(tmp+0.5);
|
||||
}
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
const Mat LogPolar_Overlapping::to_cartesian(const Mat &source)
|
||||
{
|
||||
Mat out(N,M,CV_8UC1,Scalar(0));
|
||||
|
||||
Mat source_border;
|
||||
copyMakeBorder(source,source_border,0,1,0,0,BORDER_CONSTANT,Scalar(0));
|
||||
Mat rowS = source_border.row(S);
|
||||
source_border.row(0).copyTo(rowS);
|
||||
remap(source_border,out,CSIyx,ETAyx,INTER_LINEAR);
|
||||
|
||||
int wm=w_ker_2D[R-1].w;
|
||||
|
||||
std::vector<double> IMG((N+2*wm+1)*(M+2*wm+1), 0.);
|
||||
std::vector<double> NOR((N+2*wm+1)*(M+2*wm+1), 0.);
|
||||
|
||||
for(int v=0; v<S; v++)
|
||||
for(int u=ind1; u<R; u++)
|
||||
{
|
||||
int w=w_ker_2D[v*R+u].w;
|
||||
for(int j=0; j<(2*w+1); j++)
|
||||
{
|
||||
for(int i=0; i<(2*w+1); i++)
|
||||
{
|
||||
int ind=(M+2*wm+1)*((j-w)+Rsr[v*R+u]+wm)+(i-w)+Csr[v*R+u]+wm;
|
||||
IMG[ind]+=((w_ker_2D[v*R+u]).weights[j*(2*w+1)+i])*source.at<uchar>(v, u);
|
||||
NOR[ind]+=((w_ker_2D[v*R+u]).weights[j*(2*w+1)+i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for(int i=0; i<((N+2*wm+1)*(M+2*wm+1)); i++)
|
||||
IMG[i]/=NOR[i];
|
||||
|
||||
//int xc=M/2-1, yc=N/2-1;
|
||||
|
||||
for(int j=wm; j<N+wm; j++)
|
||||
for(int i=wm; i<M+wm; i++)
|
||||
{
|
||||
/*if(NOR[(M+2*wm+1)*j+i]>0)
|
||||
ret[M*(j-wm)+i-wm]=(int) floor(IMG[(M+2*wm+1)*j+i]+0.5);*/
|
||||
//int ro=(int)floor(std::sqrt((double)((j-wm-yc)*(j-wm-yc)+(i-wm-xc)*(i-wm-xc))));
|
||||
int csi=(int) floor(CSIyx.at<float>(j-wm,i-wm));
|
||||
|
||||
if((csi>=(ind1-(w_ker_2D[ind1]).w))&&(csi<R))
|
||||
out.at<uchar>(j-wm,i-wm)=(uchar) floor(IMG[(M+2*wm+1)*j+i]+0.5);
|
||||
}
|
||||
|
||||
Mat out_cropped=out(Range(top,N-1-bottom),Range(left,M-1-right));
|
||||
return out_cropped;
|
||||
}
|
||||
|
||||
LogPolar_Overlapping::~LogPolar_Overlapping()
|
||||
{
|
||||
}
|
||||
|
||||
//----------------------------------------adjacent---------------------------------------
|
||||
|
||||
LogPolar_Adjacent::LogPolar_Adjacent(int w, int h, Point2i center, int _R, double _ro0, double smin, int full, int _s, int sp)
|
||||
{
|
||||
if ( (center.x!=w/2 || center.y!=h/2) && full==0) full=1;
|
||||
|
||||
if (center.x<0) center.x=0;
|
||||
if (center.y<0) center.y=0;
|
||||
if (center.x>=w) center.x=w-1;
|
||||
if (center.y>=h) center.y=h-1;
|
||||
|
||||
if (full){
|
||||
int rtmp;
|
||||
|
||||
if (center.x<=w/2 && center.y>=h/2)
|
||||
rtmp=(int)std::sqrt((float)center.y*center.y + (float)(w-center.x)*(w-center.x));
|
||||
else if (center.x>=w/2 && center.y>=h/2)
|
||||
rtmp=(int)std::sqrt((float)center.y*center.y + (float)center.x*center.x);
|
||||
else if (center.x>=w/2 && center.y<=h/2)
|
||||
rtmp=(int)std::sqrt((float)(h-center.y)*(h-center.y) + (float)center.x*center.x);
|
||||
else //if (center.x<=w/2 && center.y<=h/2)
|
||||
rtmp=(int)std::sqrt((float)(h-center.y)*(h-center.y) + (float)(w-center.x)*(w-center.x));
|
||||
|
||||
M=2*rtmp; N=2*rtmp;
|
||||
|
||||
top = M/2 - center.y;
|
||||
bottom = M/2 - (h-center.y);
|
||||
left = M/2 - center.x;
|
||||
right = M/2 - (w - center.x);
|
||||
|
||||
}else{
|
||||
top=bottom=left=right=0;
|
||||
M=w; N=h;
|
||||
}
|
||||
|
||||
if (sp){
|
||||
int jc=M/2-1, ic=N/2-1;
|
||||
int _romax=std::min(ic, jc);
|
||||
double _a=std::exp(std::log((double)(_romax/2-1)/(double)ro0)/(double)R);
|
||||
S=(int) floor(2*CV_PI/(_a-1)+0.5);
|
||||
}
|
||||
|
||||
create_map(M, N, _R, _s, _ro0, smin);
|
||||
}
|
||||
|
||||
|
||||
void LogPolar_Adjacent::create_map(int _M, int _n, int _R, int _s, double _ro0, double smin)
|
||||
{
|
||||
M=_M;
|
||||
N=_n;
|
||||
R=_R;
|
||||
S=_s;
|
||||
ro0=_ro0;
|
||||
romax=std::min(M/2.0, N/2.0);
|
||||
|
||||
a=std::exp(std::log(romax/ro0)/(double)R);
|
||||
q=S/(2*CV_PI);
|
||||
|
||||
A.resize(R*S);
|
||||
L.resize(M*N);
|
||||
|
||||
for(int i=0; i<R*S; i++)
|
||||
A[i]=0;
|
||||
|
||||
double xc=M/2.0, yc=N/2.0;
|
||||
|
||||
for(int j=0; j<N; j++)
|
||||
for(int i=0; i<M; i++)
|
||||
{
|
||||
double x=i+0.5-xc, y=j+0.5-yc;
|
||||
subdivide_recursively(x, y, i, j, 1, smin);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LogPolar_Adjacent::subdivide_recursively(double x, double y, int i, int j, double length, double smin)
|
||||
{
|
||||
if(length<=smin)
|
||||
{
|
||||
int u, v;
|
||||
if(get_uv(x, y, u, v))
|
||||
{
|
||||
pixel p;
|
||||
p.u=u;
|
||||
p.v=v;
|
||||
p.a=length*length;
|
||||
L[M*j+i].push_back(p);
|
||||
A[v*R+u]+=length*length;
|
||||
}
|
||||
}
|
||||
|
||||
if(length>smin)
|
||||
{
|
||||
double xs[4], ys[4];
|
||||
int us[4], vs[4];
|
||||
|
||||
xs[0]=xs[3]=x+length/4.0;
|
||||
xs[1]=xs[2]=x-length/4.0;
|
||||
ys[1]=ys[0]=y+length/4.0;
|
||||
ys[2]=ys[3]=y-length/4.0;
|
||||
|
||||
for(int z=0; z<4; z++)
|
||||
get_uv(xs[z], ys[z], us[z], vs[z]);
|
||||
|
||||
bool c=true;
|
||||
for(int w=1; w<4; w++)
|
||||
{
|
||||
if(us[w]!=us[w-1])
|
||||
c=false;
|
||||
if(vs[w]!=vs[w-1])
|
||||
c=false;
|
||||
}
|
||||
|
||||
if(c)
|
||||
{
|
||||
if(us[0]!=-1)
|
||||
{
|
||||
pixel p;
|
||||
p.u=us[0];
|
||||
p.v=vs[0];
|
||||
p.a=length*length;
|
||||
L[M*j+i].push_back(p);
|
||||
A[vs[0]*R+us[0]]+=length*length;
|
||||
}
|
||||
}
|
||||
|
||||
else
|
||||
{
|
||||
for(int z=0; z<4; z++)
|
||||
if(us[z]!=-1)
|
||||
subdivide_recursively(xs[z], ys[z], i, j, length/2.0, smin);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
const Mat LogPolar_Adjacent::to_cortical(const Mat &source)
|
||||
{
|
||||
Mat source_border;
|
||||
copyMakeBorder(source,source_border,top,bottom,left,right,BORDER_CONSTANT,Scalar(0));
|
||||
|
||||
std::vector<double> map(R*S, 0.);
|
||||
|
||||
for(int j=0; j<N; j++)
|
||||
for(int i=0; i<M; i++)
|
||||
{
|
||||
for(size_t z=0; z<(L[M*j+i]).size(); z++)
|
||||
{
|
||||
map[R*((L[M*j+i])[z].v)+((L[M*j+i])[z].u)]+=((L[M*j+i])[z].a)*(source_border.at<uchar>(j,i));
|
||||
}
|
||||
}
|
||||
|
||||
for(int i=0; i<R*S; i++)
|
||||
map[i]/=A[i];
|
||||
|
||||
Mat out(S,R,CV_8UC1,Scalar(0));
|
||||
|
||||
for(int i=0; i<S; i++)
|
||||
for(int j=0;j<R;j++)
|
||||
out.at<uchar>(i,j)=(uchar) floor(map[i*R+j]+0.5);
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
const Mat LogPolar_Adjacent::to_cartesian(const Mat &source)
|
||||
{
|
||||
std::vector<double> map(M*N, 0.);
|
||||
|
||||
for(int j=0; j<N; j++)
|
||||
for(int i=0; i<M; i++)
|
||||
{
|
||||
for(size_t z=0; z<(L[M*j+i]).size(); z++)
|
||||
{
|
||||
map[M*j+i]+=(L[M*j+i])[z].a*source.at<uchar>((L[M*j+i])[z].v,(L[M*j+i])[z].u);
|
||||
}
|
||||
}
|
||||
|
||||
Mat out(N,M,CV_8UC1,Scalar(0));
|
||||
|
||||
for(int i=0; i<N; i++)
|
||||
for(int j=0; j<M; j++)
|
||||
out.at<uchar>(i,j)=(uchar) floor(map[i*M+j]+0.5);
|
||||
|
||||
Mat out_cropped=out(Range(top,N-1-bottom),Range(left,M-1-right));
|
||||
return out_cropped;
|
||||
}
|
||||
|
||||
|
||||
bool LogPolar_Adjacent::get_uv(double x, double y, int&u, int&v)
|
||||
{
|
||||
double ro=std::sqrt(x*x+y*y), theta;
|
||||
if(x>0)
|
||||
theta=atan(y/x);
|
||||
else
|
||||
theta=atan(y/x)+CV_PI;
|
||||
|
||||
if(ro<ro0||ro>romax)
|
||||
{
|
||||
u=-1;
|
||||
v=-1;
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
u= (int) floor(std::log(ro/ro0)/std::log(a));
|
||||
if(theta>=0)
|
||||
v= (int) floor(q*theta);
|
||||
else
|
||||
v= (int) floor(q*(theta+2*CV_PI));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
LogPolar_Adjacent::~LogPolar_Adjacent()
|
||||
{
|
||||
}
|
||||
|
||||
}
|