mirror of
https://github.com/opencv/opencv.git
synced 2024-11-27 20:50:25 +08:00
Move legacy part of opencv_contrib to separate header
This commit is contained in:
parent
3b364330ad
commit
909d6fcf51
@ -51,11 +51,12 @@
|
||||
#include "opencv2/photo/photo_c.h"
|
||||
#include "opencv2/video/tracking_c.h"
|
||||
#include "opencv2/objdetect/objdetect_c.h"
|
||||
#include "opencv2/contrib/compat.hpp"
|
||||
|
||||
#include "opencv2/legacy.hpp"
|
||||
#include "opencv2/legacy/compat.hpp"
|
||||
#include "opencv2/legacy/blobtrack.hpp"
|
||||
|
||||
#include "opencv2/contrib.hpp"
|
||||
|
||||
#endif
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
384
modules/contrib/include/opencv2/contrib/compat.hpp
Normal file
384
modules/contrib/include/opencv2/contrib/compat.hpp
Normal file
@ -0,0 +1,384 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef __OPENCV_CONTRIB_COMPAT_HPP__
|
||||
#define __OPENCV_CONTRIB_COMPAT_HPP__
|
||||
|
||||
#include "opencv2/core/core_c.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
||||
/****************************************************************************************\
|
||||
* Adaptive Skin Detector *
|
||||
\****************************************************************************************/
|
||||
|
||||
class CV_EXPORTS CvAdaptiveSkinDetector
|
||||
{
|
||||
private:
|
||||
enum {
|
||||
GSD_HUE_LT = 3,
|
||||
GSD_HUE_UT = 33,
|
||||
GSD_INTENSITY_LT = 15,
|
||||
GSD_INTENSITY_UT = 250
|
||||
};
|
||||
|
||||
class CV_EXPORTS Histogram
|
||||
{
|
||||
private:
|
||||
enum {
|
||||
HistogramSize = (GSD_HUE_UT - GSD_HUE_LT + 1)
|
||||
};
|
||||
|
||||
protected:
|
||||
int findCoverageIndex(double surfaceToCover, int defaultValue = 0);
|
||||
|
||||
public:
|
||||
CvHistogram *fHistogram;
|
||||
Histogram();
|
||||
virtual ~Histogram();
|
||||
|
||||
void findCurveThresholds(int &x1, int &x2, double percent = 0.05);
|
||||
void mergeWith(Histogram *source, double weight);
|
||||
};
|
||||
|
||||
int nStartCounter, nFrameCount, nSkinHueLowerBound, nSkinHueUpperBound, nMorphingMethod, nSamplingDivider;
|
||||
double fHistogramMergeFactor, fHuePercentCovered;
|
||||
Histogram histogramHueMotion, skinHueHistogram;
|
||||
IplImage *imgHueFrame, *imgSaturationFrame, *imgLastGrayFrame, *imgMotionFrame, *imgFilteredFrame;
|
||||
IplImage *imgShrinked, *imgTemp, *imgGrayFrame, *imgHSVFrame;
|
||||
|
||||
protected:
|
||||
void initData(IplImage *src, int widthDivider, int heightDivider);
|
||||
void adaptiveFilter();
|
||||
|
||||
public:
|
||||
|
||||
enum {
|
||||
MORPHING_METHOD_NONE = 0,
|
||||
MORPHING_METHOD_ERODE = 1,
|
||||
MORPHING_METHOD_ERODE_ERODE = 2,
|
||||
MORPHING_METHOD_ERODE_DILATE = 3
|
||||
};
|
||||
|
||||
CvAdaptiveSkinDetector(int samplingDivider = 1, int morphingMethod = MORPHING_METHOD_NONE);
|
||||
virtual ~CvAdaptiveSkinDetector();
|
||||
|
||||
virtual void process(IplImage *inputBGRImage, IplImage *outputHueMask);
|
||||
};
|
||||
|
||||
|
||||
/****************************************************************************************\
|
||||
* Fuzzy MeanShift Tracker *
|
||||
\****************************************************************************************/
|
||||
|
||||
class CV_EXPORTS CvFuzzyPoint {
|
||||
public:
|
||||
double x, y, value;
|
||||
|
||||
CvFuzzyPoint(double _x, double _y);
|
||||
};
|
||||
|
||||
class CV_EXPORTS CvFuzzyCurve {
|
||||
private:
|
||||
std::vector<CvFuzzyPoint> points;
|
||||
double value, centre;
|
||||
|
||||
bool between(double x, double x1, double x2);
|
||||
|
||||
public:
|
||||
CvFuzzyCurve();
|
||||
~CvFuzzyCurve();
|
||||
|
||||
void setCentre(double _centre);
|
||||
double getCentre();
|
||||
void clear();
|
||||
void addPoint(double x, double y);
|
||||
double calcValue(double param);
|
||||
double getValue();
|
||||
void setValue(double _value);
|
||||
};
|
||||
|
||||
class CV_EXPORTS CvFuzzyFunction {
|
||||
public:
|
||||
std::vector<CvFuzzyCurve> curves;
|
||||
|
||||
CvFuzzyFunction();
|
||||
~CvFuzzyFunction();
|
||||
void addCurve(CvFuzzyCurve *curve, double value = 0);
|
||||
void resetValues();
|
||||
double calcValue();
|
||||
CvFuzzyCurve *newCurve();
|
||||
};
|
||||
|
||||
class CV_EXPORTS CvFuzzyRule {
|
||||
private:
|
||||
CvFuzzyCurve *fuzzyInput1, *fuzzyInput2;
|
||||
CvFuzzyCurve *fuzzyOutput;
|
||||
public:
|
||||
CvFuzzyRule();
|
||||
~CvFuzzyRule();
|
||||
void setRule(CvFuzzyCurve *c1, CvFuzzyCurve *c2, CvFuzzyCurve *o1);
|
||||
double calcValue(double param1, double param2);
|
||||
CvFuzzyCurve *getOutputCurve();
|
||||
};
|
||||
|
||||
class CV_EXPORTS CvFuzzyController {
|
||||
private:
|
||||
std::vector<CvFuzzyRule*> rules;
|
||||
public:
|
||||
CvFuzzyController();
|
||||
~CvFuzzyController();
|
||||
void addRule(CvFuzzyCurve *c1, CvFuzzyCurve *c2, CvFuzzyCurve *o1);
|
||||
double calcOutput(double param1, double param2);
|
||||
};
|
||||
|
||||
class CV_EXPORTS CvFuzzyMeanShiftTracker
|
||||
{
|
||||
private:
|
||||
class FuzzyResizer
|
||||
{
|
||||
private:
|
||||
CvFuzzyFunction iInput, iOutput;
|
||||
CvFuzzyController fuzzyController;
|
||||
public:
|
||||
FuzzyResizer();
|
||||
int calcOutput(double edgeDensity, double density);
|
||||
};
|
||||
|
||||
class SearchWindow
|
||||
{
|
||||
public:
|
||||
FuzzyResizer *fuzzyResizer;
|
||||
int x, y;
|
||||
int width, height, maxWidth, maxHeight, ellipseHeight, ellipseWidth;
|
||||
int ldx, ldy, ldw, ldh, numShifts, numIters;
|
||||
int xGc, yGc;
|
||||
long m00, m01, m10, m11, m02, m20;
|
||||
double ellipseAngle;
|
||||
double density;
|
||||
unsigned int depthLow, depthHigh;
|
||||
int verticalEdgeLeft, verticalEdgeRight, horizontalEdgeTop, horizontalEdgeBottom;
|
||||
|
||||
SearchWindow();
|
||||
~SearchWindow();
|
||||
void setSize(int _x, int _y, int _width, int _height);
|
||||
void initDepthValues(IplImage *maskImage, IplImage *depthMap);
|
||||
bool shift();
|
||||
void extractInfo(IplImage *maskImage, IplImage *depthMap, bool initDepth);
|
||||
void getResizeAttribsEdgeDensityLinear(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh);
|
||||
void getResizeAttribsInnerDensity(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh);
|
||||
void getResizeAttribsEdgeDensityFuzzy(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh);
|
||||
bool meanShift(IplImage *maskImage, IplImage *depthMap, int maxIteration, bool initDepth);
|
||||
};
|
||||
|
||||
public:
|
||||
enum TrackingState
|
||||
{
|
||||
tsNone = 0,
|
||||
tsSearching = 1,
|
||||
tsTracking = 2,
|
||||
tsSetWindow = 3,
|
||||
tsDisabled = 10
|
||||
};
|
||||
|
||||
enum ResizeMethod {
|
||||
rmEdgeDensityLinear = 0,
|
||||
rmEdgeDensityFuzzy = 1,
|
||||
rmInnerDensity = 2
|
||||
};
|
||||
|
||||
enum {
|
||||
MinKernelMass = 1000
|
||||
};
|
||||
|
||||
SearchWindow kernel;
|
||||
int searchMode;
|
||||
|
||||
private:
|
||||
enum
|
||||
{
|
||||
MaxMeanShiftIteration = 5,
|
||||
MaxSetSizeIteration = 5
|
||||
};
|
||||
|
||||
void findOptimumSearchWindow(SearchWindow &searchWindow, IplImage *maskImage, IplImage *depthMap, int maxIteration, int resizeMethod, bool initDepth);
|
||||
|
||||
public:
|
||||
CvFuzzyMeanShiftTracker();
|
||||
~CvFuzzyMeanShiftTracker();
|
||||
|
||||
void track(IplImage *maskImage, IplImage *depthMap, int resizeMethod, bool resetSearch, int minKernelMass = MinKernelMass);
|
||||
};
|
||||
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
typedef bool (*BundleAdjustCallback)(int iteration, double norm_error, void* user_data);
|
||||
|
||||
class CV_EXPORTS LevMarqSparse {
|
||||
public:
|
||||
LevMarqSparse();
|
||||
LevMarqSparse(int npoints, // number of points
|
||||
int ncameras, // number of cameras
|
||||
int nPointParams, // number of params per one point (3 in case of 3D points)
|
||||
int nCameraParams, // number of parameters per one camera
|
||||
int nErrParams, // number of parameters in measurement vector
|
||||
// for 1 point at one camera (2 in case of 2D projections)
|
||||
Mat& visibility, // visibility matrix. rows correspond to points, columns correspond to cameras
|
||||
// 1 - point is visible for the camera, 0 - invisible
|
||||
Mat& P0, // starting vector of parameters, first cameras then points
|
||||
Mat& X, // measurements, in order of visibility. non visible cases are skipped
|
||||
TermCriteria criteria, // termination criteria
|
||||
|
||||
// callback for estimation of Jacobian matrices
|
||||
void (*fjac)(int i, int j, Mat& point_params,
|
||||
Mat& cam_params, Mat& A, Mat& B, void* data),
|
||||
// callback for estimation of backprojection errors
|
||||
void (*func)(int i, int j, Mat& point_params,
|
||||
Mat& cam_params, Mat& estim, void* data),
|
||||
void* data, // user-specific data passed to the callbacks
|
||||
BundleAdjustCallback cb, void* user_data
|
||||
);
|
||||
|
||||
virtual ~LevMarqSparse();
|
||||
|
||||
virtual void run( int npoints, // number of points
|
||||
int ncameras, // number of cameras
|
||||
int nPointParams, // number of params per one point (3 in case of 3D points)
|
||||
int nCameraParams, // number of parameters per one camera
|
||||
int nErrParams, // number of parameters in measurement vector
|
||||
// for 1 point at one camera (2 in case of 2D projections)
|
||||
Mat& visibility, // visibility matrix. rows correspond to points, columns correspond to cameras
|
||||
// 1 - point is visible for the camera, 0 - invisible
|
||||
Mat& P0, // starting vector of parameters, first cameras then points
|
||||
Mat& X, // measurements, in order of visibility. non visible cases are skipped
|
||||
TermCriteria criteria, // termination criteria
|
||||
|
||||
// callback for estimation of Jacobian matrices
|
||||
void (CV_CDECL * fjac)(int i, int j, Mat& point_params,
|
||||
Mat& cam_params, Mat& A, Mat& B, void* data),
|
||||
// callback for estimation of backprojection errors
|
||||
void (CV_CDECL * func)(int i, int j, Mat& point_params,
|
||||
Mat& cam_params, Mat& estim, void* data),
|
||||
void* data // user-specific data passed to the callbacks
|
||||
);
|
||||
|
||||
virtual void clear();
|
||||
|
||||
// useful function to do simple bundle adjustment tasks
|
||||
static void bundleAdjust(std::vector<Point3d>& points, // positions of points in global coordinate system (input and output)
|
||||
const std::vector<std::vector<Point2d> >& imagePoints, // projections of 3d points for every camera
|
||||
const std::vector<std::vector<int> >& visibility, // visibility of 3d points for every camera
|
||||
std::vector<Mat>& cameraMatrix, // intrinsic matrices of all cameras (input and output)
|
||||
std::vector<Mat>& R, // rotation matrices of all cameras (input and output)
|
||||
std::vector<Mat>& T, // translation vector of all cameras (input and output)
|
||||
std::vector<Mat>& distCoeffs, // distortion coefficients of all cameras (input and output)
|
||||
const TermCriteria& criteria=
|
||||
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON),
|
||||
BundleAdjustCallback cb = 0, void* user_data = 0);
|
||||
|
||||
public:
|
||||
virtual void optimize(CvMat &_vis); //main function that runs minimization
|
||||
|
||||
//iteratively asks for measurement for visible camera-point pairs
|
||||
void ask_for_proj(CvMat &_vis,bool once=false);
|
||||
//iteratively asks for Jacobians for every camera_point pair
|
||||
void ask_for_projac(CvMat &_vis);
|
||||
|
||||
CvMat* err; //error X-hX
|
||||
double prevErrNorm, errNorm;
|
||||
double lambda;
|
||||
CvTermCriteria criteria;
|
||||
int iters;
|
||||
|
||||
CvMat** U; //size of array is equal to number of cameras
|
||||
CvMat** V; //size of array is equal to number of points
|
||||
CvMat** inv_V_star; //inverse of V*
|
||||
|
||||
CvMat** A;
|
||||
CvMat** B;
|
||||
CvMat** W;
|
||||
|
||||
CvMat* X; //measurement
|
||||
CvMat* hX; //current measurement extimation given new parameter vector
|
||||
|
||||
CvMat* prevP; //current already accepted parameter.
|
||||
CvMat* P; // parameters used to evaluate function with new params
|
||||
// this parameters may be rejected
|
||||
|
||||
CvMat* deltaP; //computed increase of parameters (result of normal system solution )
|
||||
|
||||
CvMat** ea; // sum_i AijT * e_ij , used as right part of normal equation
|
||||
// length of array is j = number of cameras
|
||||
CvMat** eb; // sum_j BijT * e_ij , used as right part of normal equation
|
||||
// length of array is i = number of points
|
||||
|
||||
CvMat** Yj; //length of array is i = num_points
|
||||
|
||||
CvMat* S; //big matrix of block Sjk , each block has size num_cam_params x num_cam_params
|
||||
|
||||
CvMat* JtJ_diag; //diagonal of JtJ, used to backup diagonal elements before augmentation
|
||||
|
||||
CvMat* Vis_index; // matrix which element is index of measurement for point i and camera j
|
||||
|
||||
int num_cams;
|
||||
int num_points;
|
||||
int num_err_param;
|
||||
int num_cam_param;
|
||||
int num_point_param;
|
||||
|
||||
//target function and jacobian pointers, which needs to be initialized
|
||||
void (*fjac)(int i, int j, Mat& point_params, Mat& cam_params, Mat& A, Mat& B, void* data);
|
||||
void (*func)(int i, int j, Mat& point_params, Mat& cam_params, Mat& estim, void* data);
|
||||
|
||||
void* data;
|
||||
|
||||
BundleAdjustCallback cb;
|
||||
void* user_data;
|
||||
};
|
||||
|
||||
} // cv
|
||||
|
||||
#endif /* __cplusplus */
|
||||
|
||||
#endif /* __OPENCV_CONTRIB_COMPAT_HPP__ */
|
@ -36,6 +36,7 @@
|
||||
|
||||
#include "precomp.hpp"
|
||||
#include "opencv2/imgproc/imgproc_c.h"
|
||||
#include "opencv2/contrib/compat.hpp"
|
||||
|
||||
#define ASD_INTENSITY_SET_PIXEL(pointer, qq) {(*pointer) = (unsigned char)qq;}
|
||||
|
||||
|
@ -41,6 +41,7 @@
|
||||
|
||||
#include "precomp.hpp"
|
||||
#include "opencv2/calib3d.hpp"
|
||||
#include "opencv2/contrib/compat.hpp"
|
||||
#include "opencv2/calib3d/calib3d_c.h"
|
||||
#include <iostream>
|
||||
|
||||
|
@ -142,7 +142,7 @@ private:
|
||||
LocationScaleImageRange(const std::vector<Point>& locations, const std::vector<float>& _scales) :
|
||||
locations_(locations), scales_(_scales)
|
||||
{
|
||||
assert(locations.size()==_scales.size());
|
||||
CV_Assert(locations.size()==_scales.size());
|
||||
}
|
||||
|
||||
ImageIterator* iterator() const
|
||||
@ -393,7 +393,7 @@ private:
|
||||
LocationScaleImageIterator(const std::vector<Point>& locations, const std::vector<float>& _scales) :
|
||||
locations_(locations), scales_(_scales)
|
||||
{
|
||||
assert(locations.size()==_scales.size());
|
||||
CV_Assert(locations.size()==_scales.size());
|
||||
reset();
|
||||
}
|
||||
|
||||
@ -622,7 +622,7 @@ void ChamferMatcher::Matching::followContour(Mat& templ_img, template_coords_t&
|
||||
coordinate_t next;
|
||||
unsigned char ptr;
|
||||
|
||||
assert (direction==-1 || !coords.empty());
|
||||
CV_Assert (direction==-1 || !coords.empty());
|
||||
|
||||
coordinate_t crt = coords.back();
|
||||
|
||||
@ -903,18 +903,18 @@ void ChamferMatcher::Template::show() const
|
||||
p2.x = x + pad*(int)(sin(orientations[i])*100)/100;
|
||||
p2.y = y + pad*(int)(cos(orientations[i])*100)/100;
|
||||
|
||||
line(templ_color, p1,p2, CV_RGB(255,0,0));
|
||||
line(templ_color, p1,p2, Scalar(255,0,0));
|
||||
}
|
||||
}
|
||||
|
||||
circle(templ_color,Point(center.x + pad, center.y + pad),1,CV_RGB(0,255,0));
|
||||
circle(templ_color,Point(center.x + pad, center.y + pad),1,Scalar(0,255,0));
|
||||
|
||||
#ifdef HAVE_OPENCV_HIGHGUI
|
||||
namedWindow("templ",1);
|
||||
imshow("templ",templ_color);
|
||||
waitKey();
|
||||
#else
|
||||
CV_Error(CV_StsNotImplemented, "OpenCV has been compiled without GUI support");
|
||||
CV_Error(Error::StsNotImplemented, "OpenCV has been compiled without GUI support");
|
||||
#endif
|
||||
|
||||
templ_color.release();
|
||||
@ -1059,7 +1059,7 @@ void ChamferMatcher::Matching::fillNonContourOrientations(Mat& annotated_img, Ma
|
||||
int cols = annotated_img.cols;
|
||||
int rows = annotated_img.rows;
|
||||
|
||||
assert(orientation_img.cols==cols && orientation_img.rows==rows);
|
||||
CV_Assert(orientation_img.cols==cols && orientation_img.rows==rows);
|
||||
|
||||
for (int y=0;y<rows;++y) {
|
||||
for (int x=0;x<cols;++x) {
|
||||
@ -1279,7 +1279,7 @@ void ChamferMatcher::showMatch(Mat& img, int index)
|
||||
std::cout << "Index too big.\n" << std::endl;
|
||||
}
|
||||
|
||||
assert(img.channels()==3);
|
||||
CV_Assert(img.channels()==3);
|
||||
|
||||
Match match = matches[index];
|
||||
|
||||
@ -1298,7 +1298,7 @@ void ChamferMatcher::showMatch(Mat& img, int index)
|
||||
|
||||
void ChamferMatcher::showMatch(Mat& img, Match match)
|
||||
{
|
||||
assert(img.channels()==3);
|
||||
CV_Assert(img.channels()==3);
|
||||
|
||||
const template_coords_t& templ_coords = match.tpl->coords;
|
||||
for (size_t i=0;i<templ_coords.size();++i) {
|
||||
|
@ -40,7 +40,7 @@ static Mat linspace(float x0, float x1, int n)
|
||||
static void sortMatrixRowsByIndices(InputArray _src, InputArray _indices, OutputArray _dst)
|
||||
{
|
||||
if(_indices.getMat().type() != CV_32SC1)
|
||||
CV_Error(CV_StsUnsupportedFormat, "cv::sortRowsByIndices only works on integer indices!");
|
||||
CV_Error(Error::StsUnsupportedFormat, "cv::sortRowsByIndices only works on integer indices!");
|
||||
Mat src = _src.getMat();
|
||||
std::vector<int> indices = _indices.getMat();
|
||||
_dst.create(src.rows, src.cols, src.type());
|
||||
@ -64,8 +64,8 @@ static Mat argsort(InputArray _src, bool ascending=true)
|
||||
{
|
||||
Mat src = _src.getMat();
|
||||
if (src.rows != 1 && src.cols != 1)
|
||||
CV_Error(CV_StsBadArg, "cv::argsort only sorts 1D matrices.");
|
||||
int flags = CV_SORT_EVERY_ROW+(ascending ? CV_SORT_ASCENDING : CV_SORT_DESCENDING);
|
||||
CV_Error(Error::StsBadArg, "cv::argsort only sorts 1D matrices.");
|
||||
int flags = SORT_EVERY_ROW | (ascending ? SORT_ASCENDING : SORT_DESCENDING);
|
||||
Mat sorted_indices;
|
||||
sortIdx(src.reshape(1,1),sorted_indices,flags);
|
||||
return sorted_indices;
|
||||
@ -116,8 +116,8 @@ static Mat interp1(InputArray _x, InputArray _Y, InputArray _xi)
|
||||
Mat Y = _Y.getMat();
|
||||
Mat xi = _xi.getMat();
|
||||
// check types & alignment
|
||||
assert((x.type() == Y.type()) && (Y.type() == xi.type()));
|
||||
assert((x.cols == 1) && (x.rows == Y.rows) && (x.cols == Y.cols));
|
||||
CV_Assert((x.type() == Y.type()) && (Y.type() == xi.type()));
|
||||
CV_Assert((x.cols == 1) && (x.rows == Y.rows) && (x.cols == Y.cols));
|
||||
// call templated interp1
|
||||
switch(x.type()) {
|
||||
case CV_8SC1: return interp1_<char>(x,Y,xi); break;
|
||||
@ -127,7 +127,7 @@ static Mat interp1(InputArray _x, InputArray _Y, InputArray _xi)
|
||||
case CV_32SC1: return interp1_<int>(x,Y,xi); break;
|
||||
case CV_32FC1: return interp1_<float>(x,Y,xi); break;
|
||||
case CV_64FC1: return interp1_<double>(x,Y,xi); break;
|
||||
default: CV_Error(CV_StsUnsupportedFormat, ""); break;
|
||||
default: CV_Error(Error::StsUnsupportedFormat, ""); break;
|
||||
}
|
||||
return Mat();
|
||||
}
|
||||
@ -473,7 +473,7 @@ namespace colormap
|
||||
void ColorMap::operator()(InputArray _src, OutputArray _dst) const
|
||||
{
|
||||
if(_lut.total() != 256)
|
||||
CV_Error(CV_StsAssert, "cv::LUT only supports tables of size 256.");
|
||||
CV_Error(Error::StsAssert, "cv::LUT only supports tables of size 256.");
|
||||
Mat src = _src.getMat();
|
||||
// Return original matrix if wrong type is given (is fail loud better here?)
|
||||
if(src.type() != CV_8UC1 && src.type() != CV_8UC3)
|
||||
@ -521,7 +521,7 @@ namespace colormap
|
||||
colormap == COLORMAP_WINTER ? (colormap::ColorMap*)(new colormap::Winter) : 0;
|
||||
|
||||
if( !cm )
|
||||
CV_Error( CV_StsBadArg, "Unknown colormap id; use one of COLORMAP_*");
|
||||
CV_Error( Error::StsBadArg, "Unknown colormap id; use one of COLORMAP_*");
|
||||
|
||||
(*cm)(src, dst);
|
||||
|
||||
|
@ -51,7 +51,7 @@ static Mat asRowMatrix(InputArrayOfArrays src, int rtype, double alpha=1, double
|
||||
// make sure the input data is a vector of matrices or vector of vector
|
||||
if(src.kind() != _InputArray::STD_VECTOR_MAT && src.kind() != _InputArray::STD_VECTOR_VECTOR) {
|
||||
String error_message = "The data is expected as InputArray::STD_VECTOR_MAT (a std::vector<Mat>) or _InputArray::STD_VECTOR_VECTOR (a std::vector< std::vector<...> >).";
|
||||
CV_Error(CV_StsBadArg, error_message);
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
}
|
||||
// number of samples
|
||||
size_t n = src.total();
|
||||
@ -67,7 +67,7 @@ static Mat asRowMatrix(InputArrayOfArrays src, int rtype, double alpha=1, double
|
||||
// make sure data can be reshaped, throw exception if not!
|
||||
if(src.getMat(i).total() != d) {
|
||||
String error_message = format("Wrong number of elements in matrix #%d! Expected %d was %d.", i, d, src.getMat(i).total());
|
||||
CV_Error(CV_StsBadArg, error_message);
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
}
|
||||
// get a hold of the current row
|
||||
Mat xi = data.row(i);
|
||||
@ -306,13 +306,13 @@ void FaceRecognizer::update(InputArrayOfArrays src, InputArray labels ) {
|
||||
}
|
||||
|
||||
String error_msg = format("This FaceRecognizer (%s) does not support updating, you have to use FaceRecognizer::train to update it.", this->name().c_str());
|
||||
CV_Error(CV_StsNotImplemented, error_msg);
|
||||
CV_Error(Error::StsNotImplemented, error_msg);
|
||||
}
|
||||
|
||||
void FaceRecognizer::save(const String& filename) const {
|
||||
FileStorage fs(filename, FileStorage::WRITE);
|
||||
if (!fs.isOpened())
|
||||
CV_Error(CV_StsError, "File can't be opened for writing!");
|
||||
CV_Error(Error::StsError, "File can't be opened for writing!");
|
||||
this->save(fs);
|
||||
fs.release();
|
||||
}
|
||||
@ -320,7 +320,7 @@ void FaceRecognizer::save(const String& filename) const {
|
||||
void FaceRecognizer::load(const String& filename) {
|
||||
FileStorage fs(filename, FileStorage::READ);
|
||||
if (!fs.isOpened())
|
||||
CV_Error(CV_StsError, "File can't be opened for writing!");
|
||||
CV_Error(Error::StsError, "File can't be opened for writing!");
|
||||
this->load(fs);
|
||||
fs.release();
|
||||
}
|
||||
@ -331,17 +331,17 @@ void FaceRecognizer::load(const String& filename) {
|
||||
void Eigenfaces::train(InputArrayOfArrays _src, InputArray _local_labels) {
|
||||
if(_src.total() == 0) {
|
||||
String error_message = format("Empty training data was given. You'll need more than one sample to learn a model.");
|
||||
CV_Error(CV_StsBadArg, error_message);
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
} else if(_local_labels.getMat().type() != CV_32SC1) {
|
||||
String error_message = format("Labels must be given as integer (CV_32SC1). Expected %d, but was %d.", CV_32SC1, _local_labels.type());
|
||||
CV_Error(CV_StsBadArg, error_message);
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
}
|
||||
// make sure data has correct size
|
||||
if(_src.total() > 1) {
|
||||
for(int i = 1; i < static_cast<int>(_src.total()); i++) {
|
||||
if(_src.getMat(i-1).total() != _src.getMat(i).total()) {
|
||||
String error_message = format("In the Eigenfaces method all input samples (training images) must be of equal size! Expected %d pixels, but was %d pixels.", _src.getMat(i-1).total(), _src.getMat(i).total());
|
||||
CV_Error(CV_StsUnsupportedFormat, error_message);
|
||||
CV_Error(Error::StsUnsupportedFormat, error_message);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -355,7 +355,7 @@ void Eigenfaces::train(InputArrayOfArrays _src, InputArray _local_labels) {
|
||||
// assert there are as much samples as labels
|
||||
if(static_cast<int>(labels.total()) != n) {
|
||||
String error_message = format("The number of samples (src) must equal the number of labels (labels)! len(src)=%d, len(labels)=%d.", n, labels.total());
|
||||
CV_Error(CV_StsBadArg, error_message);
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
}
|
||||
// clear existing model data
|
||||
_labels.release();
|
||||
@ -365,7 +365,7 @@ void Eigenfaces::train(InputArrayOfArrays _src, InputArray _local_labels) {
|
||||
_num_components = n;
|
||||
|
||||
// perform the PCA
|
||||
PCA pca(data, Mat(), CV_PCA_DATA_AS_ROW, _num_components);
|
||||
PCA pca(data, Mat(), PCA::DATA_AS_ROW, _num_components);
|
||||
// copy the PCA results
|
||||
_mean = pca.mean.reshape(1,1); // store the mean vector
|
||||
_eigenvalues = pca.eigenvalues.clone(); // eigenvalues by row
|
||||
@ -386,11 +386,11 @@ void Eigenfaces::predict(InputArray _src, int &minClass, double &minDist) const
|
||||
if(_projections.empty()) {
|
||||
// throw error if no data (or simply return -1?)
|
||||
String error_message = "This Eigenfaces model is not computed yet. Did you call Eigenfaces::train?";
|
||||
CV_Error(CV_StsError, error_message);
|
||||
CV_Error(Error::StsError, error_message);
|
||||
} else if(_eigenvectors.rows != static_cast<int>(src.total())) {
|
||||
// check data alignment just for clearer exception messages
|
||||
String error_message = format("Wrong input image size. Reason: Training and Test images must be of equal size! Expected an image with %d elements, but got %d.", _eigenvectors.rows, src.total());
|
||||
CV_Error(CV_StsBadArg, error_message);
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
}
|
||||
// project into PCA subspace
|
||||
Mat q = subspaceProject(_eigenvectors, _mean, src.reshape(1,1));
|
||||
@ -440,17 +440,17 @@ void Eigenfaces::save(FileStorage& fs) const {
|
||||
void Fisherfaces::train(InputArrayOfArrays src, InputArray _lbls) {
|
||||
if(src.total() == 0) {
|
||||
String error_message = format("Empty training data was given. You'll need more than one sample to learn a model.");
|
||||
CV_Error(CV_StsBadArg, error_message);
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
} else if(_lbls.getMat().type() != CV_32SC1) {
|
||||
String error_message = format("Labels must be given as integer (CV_32SC1). Expected %d, but was %d.", CV_32SC1, _lbls.type());
|
||||
CV_Error(CV_StsBadArg, error_message);
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
}
|
||||
// make sure data has correct size
|
||||
if(src.total() > 1) {
|
||||
for(int i = 1; i < static_cast<int>(src.total()); i++) {
|
||||
if(src.getMat(i-1).total() != src.getMat(i).total()) {
|
||||
String error_message = format("In the Fisherfaces method all input samples (training images) must be of equal size! Expected %d pixels, but was %d pixels.", src.getMat(i-1).total(), src.getMat(i).total());
|
||||
CV_Error(CV_StsUnsupportedFormat, error_message);
|
||||
CV_Error(Error::StsUnsupportedFormat, error_message);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -462,10 +462,10 @@ void Fisherfaces::train(InputArrayOfArrays src, InputArray _lbls) {
|
||||
// make sure labels are passed in correct shape
|
||||
if(labels.total() != (size_t) N) {
|
||||
String error_message = format("The number of samples (src) must equal the number of labels (labels)! len(src)=%d, len(labels)=%d.", N, labels.total());
|
||||
CV_Error(CV_StsBadArg, error_message);
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
} else if(labels.rows != 1 && labels.cols != 1) {
|
||||
String error_message = format("Expected the labels in a matrix with one row or column! Given dimensions are rows=%s, cols=%d.", labels.rows, labels.cols);
|
||||
CV_Error(CV_StsBadArg, error_message);
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
}
|
||||
// clear existing model data
|
||||
_labels.release();
|
||||
@ -481,7 +481,7 @@ void Fisherfaces::train(InputArrayOfArrays src, InputArray _lbls) {
|
||||
if((_num_components <= 0) || (_num_components > (C-1)))
|
||||
_num_components = (C-1);
|
||||
// perform a PCA and keep (N-C) components
|
||||
PCA pca(data, Mat(), CV_PCA_DATA_AS_ROW, (N-C));
|
||||
PCA pca(data, Mat(), PCA::DATA_AS_ROW, (N-C));
|
||||
// project the data and perform a LDA on it
|
||||
LDA lda(pca.project(data),labels, _num_components);
|
||||
// store the total mean vector
|
||||
@ -506,10 +506,10 @@ void Fisherfaces::predict(InputArray _src, int &minClass, double &minDist) const
|
||||
if(_projections.empty()) {
|
||||
// throw error if no data (or simply return -1?)
|
||||
String error_message = "This Fisherfaces model is not computed yet. Did you call Fisherfaces::train?";
|
||||
CV_Error(CV_StsBadArg, error_message);
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
} else if(src.total() != (size_t) _eigenvectors.rows) {
|
||||
String error_message = format("Wrong input image size. Reason: Training and Test images must be of equal size! Expected an image with %d elements, but got %d.", _eigenvectors.rows, src.total());
|
||||
CV_Error(CV_StsBadArg, error_message);
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
}
|
||||
// project into LDA subspace
|
||||
Mat q = subspaceProject(_eigenvectors, _mean, src.reshape(1,1));
|
||||
@ -641,7 +641,7 @@ static void elbp(InputArray src, OutputArray dst, int radius, int neighbors)
|
||||
case CV_64FC1: elbp_<double>(src,dst, radius, neighbors); break;
|
||||
default:
|
||||
String error_msg = format("Using Original Local Binary Patterns for feature extraction only works on single-channel images (given %d). Please pass the image data as a grayscale image!", type);
|
||||
CV_Error(CV_StsNotImplemented, error_msg);
|
||||
CV_Error(Error::StsNotImplemented, error_msg);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -687,7 +687,7 @@ static Mat histc(InputArray _src, int minVal, int maxVal, bool normed)
|
||||
return histc_(src, minVal, maxVal, normed);
|
||||
break;
|
||||
default:
|
||||
CV_Error(CV_StsUnmatchedFormats, "This type is not implemented yet."); break;
|
||||
CV_Error(Error::StsUnmatchedFormats, "This type is not implemented yet."); break;
|
||||
}
|
||||
return Mat();
|
||||
}
|
||||
@ -769,14 +769,14 @@ void LBPH::update(InputArrayOfArrays _in_src, InputArray _in_labels) {
|
||||
void LBPH::train(InputArrayOfArrays _in_src, InputArray _in_labels, bool preserveData) {
|
||||
if(_in_src.kind() != _InputArray::STD_VECTOR_MAT && _in_src.kind() != _InputArray::STD_VECTOR_VECTOR) {
|
||||
String error_message = "The images are expected as InputArray::STD_VECTOR_MAT (a std::vector<Mat>) or _InputArray::STD_VECTOR_VECTOR (a std::vector< std::vector<...> >).";
|
||||
CV_Error(CV_StsBadArg, error_message);
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
}
|
||||
if(_in_src.total() == 0) {
|
||||
String error_message = format("Empty training data was given. You'll need more than one sample to learn a model.");
|
||||
CV_Error(CV_StsUnsupportedFormat, error_message);
|
||||
CV_Error(Error::StsUnsupportedFormat, error_message);
|
||||
} else if(_in_labels.getMat().type() != CV_32SC1) {
|
||||
String error_message = format("Labels must be given as integer (CV_32SC1). Expected %d, but was %d.", CV_32SC1, _in_labels.type());
|
||||
CV_Error(CV_StsUnsupportedFormat, error_message);
|
||||
CV_Error(Error::StsUnsupportedFormat, error_message);
|
||||
}
|
||||
// get the vector of matrices
|
||||
std::vector<Mat> src;
|
||||
@ -786,7 +786,7 @@ void LBPH::train(InputArrayOfArrays _in_src, InputArray _in_labels, bool preserv
|
||||
// check if data is well- aligned
|
||||
if(labels.total() != src.size()) {
|
||||
String error_message = format("The number of samples (src) must equal the number of labels (labels). Was len(samples)=%d, len(labels)=%d.", src.size(), _labels.total());
|
||||
CV_Error(CV_StsBadArg, error_message);
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
}
|
||||
// if this model should be trained without preserving old data, delete old model data
|
||||
if(!preserveData) {
|
||||
@ -817,7 +817,7 @@ void LBPH::predict(InputArray _src, int &minClass, double &minDist) const {
|
||||
if(_histograms.empty()) {
|
||||
// throw error if no data (or simply return -1?)
|
||||
String error_message = "This LBPH model is not computed yet. Did you call the train method?";
|
||||
CV_Error(CV_StsBadArg, error_message);
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
}
|
||||
Mat src = _src.getMat();
|
||||
// get the spatial histogram from input image
|
||||
|
@ -35,6 +35,7 @@
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
#include "opencv2/contrib/compat.hpp"
|
||||
|
||||
CvFuzzyPoint::CvFuzzyPoint(double _x, double _y)
|
||||
{
|
||||
|
@ -85,7 +85,7 @@ static void downsamplePoints( const Mat& src, Mat& dst, size_t count )
|
||||
candidatePointsMask.at<uchar>(0, maxLoc.x) = 0;
|
||||
|
||||
Mat minDists;
|
||||
reduce( activedDists, minDists, 0, CV_REDUCE_MIN );
|
||||
reduce( activedDists, minDists, 0, REDUCE_MIN );
|
||||
minMaxLoc( minDists, 0, &maxVal, 0, &maxLoc, candidatePointsMask );
|
||||
dst.at<Point3_<uchar> >((int)i) = src.at<Point3_<uchar> >(maxLoc.x);
|
||||
}
|
||||
|
@ -43,9 +43,9 @@ static Mat argsort(InputArray _src, bool ascending=true)
|
||||
Mat src = _src.getMat();
|
||||
if (src.rows != 1 && src.cols != 1) {
|
||||
String error_message = "Wrong shape of input matrix! Expected a matrix with one row or column.";
|
||||
CV_Error(CV_StsBadArg, error_message);
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
}
|
||||
int flags = CV_SORT_EVERY_ROW+(ascending ? CV_SORT_ASCENDING : CV_SORT_DESCENDING);
|
||||
int flags = SORT_EVERY_ROW | (ascending ? SORT_ASCENDING : SORT_DESCENDING);
|
||||
Mat sorted_indices;
|
||||
sortIdx(src.reshape(1,1),sorted_indices,flags);
|
||||
return sorted_indices;
|
||||
@ -55,7 +55,7 @@ static Mat asRowMatrix(InputArrayOfArrays src, int rtype, double alpha=1, double
|
||||
// make sure the input data is a vector of matrices or vector of vector
|
||||
if(src.kind() != _InputArray::STD_VECTOR_MAT && src.kind() != _InputArray::STD_VECTOR_VECTOR) {
|
||||
String error_message = "The data is expected as InputArray::STD_VECTOR_MAT (a std::vector<Mat>) or _InputArray::STD_VECTOR_VECTOR (a std::vector< std::vector<...> >).";
|
||||
CV_Error(CV_StsBadArg, error_message);
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
}
|
||||
// number of samples
|
||||
size_t n = src.total();
|
||||
@ -71,7 +71,7 @@ static Mat asRowMatrix(InputArrayOfArrays src, int rtype, double alpha=1, double
|
||||
// make sure data can be reshaped, throw exception if not!
|
||||
if(src.getMat(i).total() != d) {
|
||||
String error_message = format("Wrong number of elements in matrix #%d! Expected %d was %d.", i, (int)d, (int)src.getMat(i).total());
|
||||
CV_Error(CV_StsBadArg, error_message);
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
}
|
||||
// get a hold of the current row
|
||||
Mat xi = data.row(i);
|
||||
@ -87,7 +87,7 @@ static Mat asRowMatrix(InputArrayOfArrays src, int rtype, double alpha=1, double
|
||||
|
||||
static void sortMatrixColumnsByIndices(InputArray _src, InputArray _indices, OutputArray _dst) {
|
||||
if(_indices.getMat().type() != CV_32SC1) {
|
||||
CV_Error(CV_StsUnsupportedFormat, "cv::sortColumnsByIndices only works on integer indices!");
|
||||
CV_Error(Error::StsUnsupportedFormat, "cv::sortColumnsByIndices only works on integer indices!");
|
||||
}
|
||||
Mat src = _src.getMat();
|
||||
std::vector<int> indices = _indices.getMat();
|
||||
@ -179,12 +179,12 @@ Mat subspaceProject(InputArray _W, InputArray _mean, InputArray _src) {
|
||||
// make sure the data has the correct shape
|
||||
if(W.rows != d) {
|
||||
String error_message = format("Wrong shapes for given matrices. Was size(src) = (%d,%d), size(W) = (%d,%d).", src.rows, src.cols, W.rows, W.cols);
|
||||
CV_Error(CV_StsBadArg, error_message);
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
}
|
||||
// make sure mean is correct if not empty
|
||||
if(!mean.empty() && (mean.total() != (size_t) d)) {
|
||||
String error_message = format("Wrong mean shape for the given data matrix. Expected %d, but was %d.", d, mean.total());
|
||||
CV_Error(CV_StsBadArg, error_message);
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
}
|
||||
// create temporary matrices
|
||||
Mat X, Y;
|
||||
@ -217,12 +217,12 @@ Mat subspaceReconstruct(InputArray _W, InputArray _mean, InputArray _src)
|
||||
// make sure the data has the correct shape
|
||||
if(W.cols != d) {
|
||||
String error_message = format("Wrong shapes for given matrices. Was size(src) = (%d,%d), size(W) = (%d,%d).", src.rows, src.cols, W.rows, W.cols);
|
||||
CV_Error(CV_StsBadArg, error_message);
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
}
|
||||
// make sure mean is correct if not empty
|
||||
if(!mean.empty() && (mean.total() != (size_t) W.rows)) {
|
||||
String error_message = format("Wrong mean shape for the given eigenvector matrix. Expected %d, but was %d.", W.cols, mean.total());
|
||||
CV_Error(CV_StsBadArg, error_message);
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
}
|
||||
// initalize temporary matrices
|
||||
Mat X, Y;
|
||||
@ -939,7 +939,7 @@ public:
|
||||
void LDA::save(const String& filename) const {
|
||||
FileStorage fs(filename, FileStorage::WRITE);
|
||||
if (!fs.isOpened()) {
|
||||
CV_Error(CV_StsError, "File can't be opened for writing!");
|
||||
CV_Error(Error::StsError, "File can't be opened for writing!");
|
||||
}
|
||||
this->save(fs);
|
||||
fs.release();
|
||||
@ -949,7 +949,7 @@ void LDA::save(const String& filename) const {
|
||||
void LDA::load(const String& filename) {
|
||||
FileStorage fs(filename, FileStorage::READ);
|
||||
if (!fs.isOpened())
|
||||
CV_Error(CV_StsError, "File can't be opened for writing!");
|
||||
CV_Error(Error::StsError, "File can't be opened for writing!");
|
||||
this->load(fs);
|
||||
fs.release();
|
||||
}
|
||||
@ -1002,12 +1002,12 @@ void LDA::lda(InputArrayOfArrays _src, InputArray _lbls) {
|
||||
// want to separate from each other then?
|
||||
if(C == 1) {
|
||||
String error_message = "At least two classes are needed to perform a LDA. Reason: Only one class was given!";
|
||||
CV_Error(CV_StsBadArg, error_message);
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
}
|
||||
// throw error if less labels, than samples
|
||||
if (labels.size() != static_cast<size_t>(N)) {
|
||||
String error_message = format("The number of samples must equal the number of labels. Given %d labels, %d samples. ", labels.size(), N);
|
||||
CV_Error(CV_StsBadArg, error_message);
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
}
|
||||
// warn if within-classes scatter matrix becomes singular
|
||||
if (N < D) {
|
||||
@ -1090,7 +1090,7 @@ void LDA::compute(InputArrayOfArrays _src, InputArray _lbls) {
|
||||
break;
|
||||
default:
|
||||
String error_message= format("InputArray Datatype %d is not supported.", _src.kind());
|
||||
CV_Error(CV_StsBadArg, error_message);
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -258,7 +258,7 @@ namespace cv
|
||||
|
||||
void Octree::buildTree(const std::vector<Point3f>& points3d, int maxLevels, int _minPoints)
|
||||
{
|
||||
assert((size_t)maxLevels * 8 < MAX_STACK_SIZE);
|
||||
CV_Assert((size_t)maxLevels * 8 < MAX_STACK_SIZE);
|
||||
points.resize(points3d.size());
|
||||
std::copy(points3d.begin(), points3d.end(), points.begin());
|
||||
minPoints = _minPoints;
|
||||
|
@ -450,7 +450,7 @@ bool Retina::_convertCvMat2ValarrayBuffer(const cv::Mat inputMatToConvert, std::
|
||||
inputMatToConvert.convertTo(dst, dsttype);
|
||||
}
|
||||
else
|
||||
CV_Error(CV_StsUnsupportedFormat, "input image must be single channel (gray levels), bgr format (color) or bgra (color with transparency which won't be considered");
|
||||
CV_Error(Error::StsUnsupportedFormat, "input image must be single channel (gray levels), bgr format (color) or bgra (color with transparency which won't be considered");
|
||||
|
||||
return imageNumberOfChannels>1; // return bool : false for gray level image processing, true for color mode
|
||||
}
|
||||
|
@ -422,7 +422,7 @@ bool computeKsi( int transformType,
|
||||
computeCFuncPtr = computeC_Translation;
|
||||
}
|
||||
else
|
||||
CV_Error( CV_StsBadFlag, "Unsupported value of transformation type flag.");
|
||||
CV_Error(Error::StsBadFlag, "Unsupported value of transformation type flag.");
|
||||
|
||||
Mat C( correspsCount, Cwidth, CV_64FC1 );
|
||||
Mat dI_dt( correspsCount, 1, CV_64FC1 );
|
||||
|
@ -56,24 +56,24 @@ namespace
|
||||
{
|
||||
const static Scalar colors[] =
|
||||
{
|
||||
CV_RGB(255, 0, 0),
|
||||
CV_RGB( 0, 255, 0),
|
||||
CV_RGB( 0, 0, 255),
|
||||
CV_RGB(255, 255, 0),
|
||||
CV_RGB(255, 0, 255),
|
||||
CV_RGB( 0, 255, 255),
|
||||
CV_RGB(255, 127, 127),
|
||||
CV_RGB(127, 127, 255),
|
||||
CV_RGB(127, 255, 127),
|
||||
CV_RGB(255, 255, 127),
|
||||
CV_RGB(127, 255, 255),
|
||||
CV_RGB(255, 127, 255),
|
||||
CV_RGB(127, 0, 0),
|
||||
CV_RGB( 0, 127, 0),
|
||||
CV_RGB( 0, 0, 127),
|
||||
CV_RGB(127, 127, 0),
|
||||
CV_RGB(127, 0, 127),
|
||||
CV_RGB( 0, 127, 127)
|
||||
Scalar(255, 0, 0),
|
||||
Scalar( 0, 255, 0),
|
||||
Scalar( 0, 0, 255),
|
||||
Scalar(255, 255, 0),
|
||||
Scalar(255, 0, 255),
|
||||
Scalar( 0, 255, 255),
|
||||
Scalar(255, 127, 127),
|
||||
Scalar(127, 127, 255),
|
||||
Scalar(127, 255, 127),
|
||||
Scalar(255, 255, 127),
|
||||
Scalar(127, 255, 255),
|
||||
Scalar(255, 127, 255),
|
||||
Scalar(127, 0, 0),
|
||||
Scalar( 0, 127, 0),
|
||||
Scalar( 0, 0, 127),
|
||||
Scalar(127, 127, 0),
|
||||
Scalar(127, 0, 127),
|
||||
Scalar( 0, 127, 127)
|
||||
};
|
||||
size_t colors_mum = sizeof(colors)/sizeof(colors[0]);
|
||||
|
||||
@ -199,7 +199,7 @@ void convertTransformMatrix(const float* matrix, float* sseMatrix)
|
||||
|
||||
inline __m128 transformSSE(const __m128* matrix, const __m128& in)
|
||||
{
|
||||
assert(((size_t)matrix & 15) == 0);
|
||||
CV_DbgAssert(((size_t)matrix & 15) == 0);
|
||||
__m128 a0 = _mm_mul_ps(_mm_load_ps((float*)(matrix+0)), _mm_shuffle_ps(in,in,_MM_SHUFFLE(0,0,0,0)));
|
||||
__m128 a1 = _mm_mul_ps(_mm_load_ps((float*)(matrix+1)), _mm_shuffle_ps(in,in,_MM_SHUFFLE(1,1,1,1)));
|
||||
__m128 a2 = _mm_mul_ps(_mm_load_ps((float*)(matrix+2)), _mm_shuffle_ps(in,in,_MM_SHUFFLE(2,2,2,2)));
|
||||
@ -221,8 +221,8 @@ void computeSpinImages( const Octree& Octree, const std::vector<Point3f>& points
|
||||
float pixelsPerMeter = 1.f / binSize;
|
||||
float support = imageWidth * binSize;
|
||||
|
||||
assert(normals.size() == points.size());
|
||||
assert(mask.size() == points.size());
|
||||
CV_Assert(normals.size() == points.size());
|
||||
CV_Assert(mask.size() == points.size());
|
||||
|
||||
size_t points_size = points.size();
|
||||
mask.resize(points_size);
|
||||
@ -250,7 +250,7 @@ void computeSpinImages( const Octree& Octree, const std::vector<Point3f>& points
|
||||
if (mask[i] == 0)
|
||||
continue;
|
||||
|
||||
int t = cvGetThreadNum();
|
||||
int t = getThreadNum();
|
||||
std::vector<Point3f>& pointsInSphere = pointsInSpherePool[t];
|
||||
|
||||
const Point3f& center = points[i];
|
||||
@ -289,7 +289,7 @@ void computeSpinImages( const Octree& Octree, const std::vector<Point3f>& points
|
||||
__m128 ppm4 = _mm_set1_ps(pixelsPerMeter);
|
||||
__m128i height4m1 = _mm_set1_epi32(spinImage.rows-1);
|
||||
__m128i width4m1 = _mm_set1_epi32(spinImage.cols-1);
|
||||
assert( spinImage.step <= 0xffff );
|
||||
CV_Assert( spinImage.step <= 0xffff );
|
||||
__m128i step4 = _mm_set1_epi16((short)step);
|
||||
__m128i zero4 = _mm_setzero_si128();
|
||||
__m128i one4i = _mm_set1_epi32(1);
|
||||
@ -472,7 +472,7 @@ float cv::Mesh3D::estimateResolution(float /*tryRatio*/)
|
||||
|
||||
return resolution = (float)dist[ dist.size() / 2 ];
|
||||
#else
|
||||
CV_Error(CV_StsNotImplemented, "");
|
||||
CV_Error(Error::StsNotImplemented, "");
|
||||
return 1.f;
|
||||
#endif
|
||||
}
|
||||
@ -686,16 +686,15 @@ inline float cv::SpinImageModel::groupingCreteria(const Point3f& pointScene1, co
|
||||
}
|
||||
|
||||
|
||||
cv::SpinImageModel::SpinImageModel(const Mesh3D& _mesh) : mesh(_mesh) , out(0)
|
||||
cv::SpinImageModel::SpinImageModel(const Mesh3D& _mesh) : mesh(_mesh)
|
||||
{
|
||||
if (mesh.vtx.empty())
|
||||
throw Mesh3D::EmptyMeshException();
|
||||
defaultParams();
|
||||
}
|
||||
cv::SpinImageModel::SpinImageModel() : out(0) { defaultParams(); }
|
||||
cv::SpinImageModel::~SpinImageModel() {}
|
||||
|
||||
void cv::SpinImageModel::setLogger(std::ostream* log) { out = log; }
|
||||
cv::SpinImageModel::SpinImageModel() { defaultParams(); }
|
||||
cv::SpinImageModel::~SpinImageModel() {}
|
||||
|
||||
void cv::SpinImageModel::defaultParams()
|
||||
{
|
||||
@ -756,7 +755,7 @@ Mat cv::SpinImageModel::packRandomScaledSpins(bool separateScale, size_t xCount,
|
||||
int sz = spins.front().cols;
|
||||
|
||||
Mat result((int)(yCount * sz + (yCount - 1)), (int)(xCount * sz + (xCount - 1)), CV_8UC3);
|
||||
result = colors[(static_cast<int64>(cvGetTickCount()/cvGetTickFrequency())/1000) % colors_mum];
|
||||
result = colors[(static_cast<int64>(getTickCount()/getTickFrequency())/1000) % colors_mum];
|
||||
|
||||
int pos = 0;
|
||||
for(int y = 0; y < (int)yCount; ++y)
|
||||
@ -1030,12 +1029,8 @@ private:
|
||||
matchSpinToModel(scene.spinImages.row(i), indeces, coeffs);
|
||||
for(size_t t = 0; t < indeces.size(); ++t)
|
||||
allMatches.push_back(Match(i, indeces[t], coeffs[t]));
|
||||
|
||||
if (out) if (i % 100 == 0) *out << "Comparing scene spinimage " << i << " of " << scene.spinImages.rows << std::endl;
|
||||
}
|
||||
corr_timer.stop();
|
||||
if (out) *out << "Spin correlation time = " << corr_timer << std::endl;
|
||||
if (out) *out << "Matches number = " << allMatches.size() << std::endl;
|
||||
|
||||
if(allMatches.empty())
|
||||
return;
|
||||
@ -1046,7 +1041,6 @@ private:
|
||||
allMatches.erase(
|
||||
remove_if(allMatches.begin(), allMatches.end(), bind2nd(std::less<float>(), maxMeasure * fraction)),
|
||||
allMatches.end());
|
||||
if (out) *out << "Matches number [filtered by similarity measure] = " << allMatches.size() << std::endl;
|
||||
|
||||
int matchesSize = (int)allMatches.size();
|
||||
if(matchesSize == 0)
|
||||
@ -1095,15 +1089,12 @@ private:
|
||||
allMatches.erase(
|
||||
std::remove_if(allMatches.begin(), allMatches.end(), std::bind2nd(std::equal_to<float>(), infinity)),
|
||||
allMatches.end());
|
||||
if (out) *out << "Matches number [filtered by geometric consistency] = " << allMatches.size() << std::endl;
|
||||
|
||||
|
||||
matchesSize = (int)allMatches.size();
|
||||
if(matchesSize == 0)
|
||||
return;
|
||||
|
||||
if (out) *out << "grouping ..." << std::endl;
|
||||
|
||||
Mat groupingMat((int)matchesSize, (int)matchesSize, CV_32F);
|
||||
groupingMat = Scalar(0);
|
||||
|
||||
@ -1151,8 +1142,6 @@ private:
|
||||
|
||||
for(int g = 0; g < matchesSize; ++g)
|
||||
{
|
||||
if (out) if (g % 100 == 0) *out << "G = " << g << std::endl;
|
||||
|
||||
group_t left = allMatchesInds;
|
||||
group_t group;
|
||||
|
||||
@ -1201,16 +1190,16 @@ private:
|
||||
|
||||
cv::TickMeter::TickMeter() { reset(); }
|
||||
int64 cv::TickMeter::getTimeTicks() const { return sumTime; }
|
||||
double cv::TickMeter::getTimeMicro() const { return (double)getTimeTicks()/cvGetTickFrequency(); }
|
||||
double cv::TickMeter::getTimeMicro() const { return (double)getTimeTicks()/getTickFrequency(); }
|
||||
double cv::TickMeter::getTimeMilli() const { return getTimeMicro()*1e-3; }
|
||||
double cv::TickMeter::getTimeSec() const { return getTimeMilli()*1e-3; }
|
||||
int64 cv::TickMeter::getCounter() const { return counter; }
|
||||
void cv::TickMeter::reset() {startTime = 0; sumTime = 0; counter = 0; }
|
||||
|
||||
void cv::TickMeter::start(){ startTime = cvGetTickCount(); }
|
||||
void cv::TickMeter::start(){ startTime = getTickCount(); }
|
||||
void cv::TickMeter::stop()
|
||||
{
|
||||
int64 time = cvGetTickCount();
|
||||
int64 time = getTickCount();
|
||||
if ( startTime == 0 )
|
||||
return;
|
||||
|
||||
@ -1220,4 +1209,4 @@ void cv::TickMeter::stop()
|
||||
startTime = 0;
|
||||
}
|
||||
|
||||
std::ostream& cv::operator<<(std::ostream& out, const TickMeter& tm){ return out << tm.getTimeSec() << "sec"; }
|
||||
//std::ostream& cv::operator<<(std::ostream& out, const TickMeter& tm){ return out << tm.getTimeSec() << "sec"; }
|
||||
|
@ -239,8 +239,8 @@ void StereoVar::VariationalSolver(Mat &I1, Mat &I2, Mat &I2x, Mat &u, int level)
|
||||
|
||||
void StereoVar::VCycle_MyFAS(Mat &I1, Mat &I2, Mat &I2x, Mat &_u, int level)
|
||||
{
|
||||
CvSize imgSize = _u.size();
|
||||
CvSize frmSize = cvSize((int) (imgSize.width * pyrScale + 0.5), (int) (imgSize.height * pyrScale + 0.5));
|
||||
Size imgSize = _u.size();
|
||||
Size frmSize = Size((int) (imgSize.width * pyrScale + 0.5), (int) (imgSize.height * pyrScale + 0.5));
|
||||
Mat I1_h, I2_h, I2x_h, u_h, U, U_h;
|
||||
|
||||
//PRE relaxation
|
||||
@ -285,7 +285,7 @@ void StereoVar::VCycle_MyFAS(Mat &I1, Mat &I2, Mat &I2x, Mat &_u, int level)
|
||||
void StereoVar::FMG(Mat &I1, Mat &I2, Mat &I2x, Mat &u, int level)
|
||||
{
|
||||
double scale = std::pow(pyrScale, (double) level);
|
||||
CvSize frmSize = cvSize((int) (u.cols * scale + 0.5), (int) (u.rows * scale + 0.5));
|
||||
Size frmSize = Size((int) (u.cols * scale + 0.5), (int) (u.rows * scale + 0.5));
|
||||
Mat I1_h, I2_h, I2x_h, u_h;
|
||||
|
||||
//scaling DOWN
|
||||
@ -350,7 +350,7 @@ void StereoVar::autoParams()
|
||||
void StereoVar::operator ()( const Mat& left, const Mat& right, Mat& disp )
|
||||
{
|
||||
CV_Assert(left.size() == right.size() && left.type() == right.type());
|
||||
CvSize imgSize = left.size();
|
||||
Size imgSize = left.size();
|
||||
int MaxD = MAX(labs(minDisp), labs(maxDisp));
|
||||
int SignD = 1; if (MIN(minDisp, maxDisp) < 0) SignD = -1;
|
||||
if (minDisp >= maxDisp) {MaxD = 256; SignD = 1;}
|
||||
@ -378,8 +378,8 @@ void StereoVar::operator ()( const Mat& left, const Mat& right, Mat& disp )
|
||||
equalizeHist(rightgray, rightgray);
|
||||
}
|
||||
if (poly_sigma > 0.0001) {
|
||||
GaussianBlur(leftgray, leftgray, cvSize(poly_n, poly_n), poly_sigma);
|
||||
GaussianBlur(rightgray, rightgray, cvSize(poly_n, poly_n), poly_sigma);
|
||||
GaussianBlur(leftgray, leftgray, Size(poly_n, poly_n), poly_sigma);
|
||||
GaussianBlur(rightgray, rightgray, Size(poly_n, poly_n), poly_sigma);
|
||||
}
|
||||
|
||||
if (flags & USE_AUTO_PARAMS) {
|
||||
|
@ -39,7 +39,7 @@
|
||||
#include <cstdio>
|
||||
#include <cstring>
|
||||
#include <ctime>
|
||||
#include "opencv2/contrib/contrib.hpp"
|
||||
#include "opencv2/contrib/compat.hpp"
|
||||
#include "opencv2/highgui/highgui_c.h"
|
||||
|
||||
static void help(char **argv)
|
||||
|
@ -32,7 +32,7 @@ static Mat toGrayscale(InputArray _src) {
|
||||
Mat src = _src.getMat();
|
||||
// only allow one channel
|
||||
if(src.channels() != 1) {
|
||||
CV_Error(CV_StsBadArg, "Only Matrices with one channel are supported");
|
||||
CV_Error(Error::StsBadArg, "Only Matrices with one channel are supported");
|
||||
}
|
||||
// create and return normalized image
|
||||
Mat dst;
|
||||
@ -44,7 +44,7 @@ static void read_csv(const string& filename, vector<Mat>& images, vector<int>& l
|
||||
std::ifstream file(filename.c_str(), ifstream::in);
|
||||
if (!file) {
|
||||
string error_message = "No valid input file was given, please check the given filename.";
|
||||
CV_Error(CV_StsBadArg, error_message);
|
||||
CV_Error(Error::StsBadArg, error_message);
|
||||
}
|
||||
string line, path, classlabel;
|
||||
while (getline(file, line)) {
|
||||
@ -82,7 +82,7 @@ int main(int argc, const char *argv[]) {
|
||||
// Quit if there are not enough images for this demo.
|
||||
if(images.size() <= 1) {
|
||||
string error_message = "This demo needs at least 2 images to work. Please add more images to your data set!";
|
||||
CV_Error(CV_StsError, error_message);
|
||||
CV_Error(Error::StsError, error_message);
|
||||
}
|
||||
// Get the height from the first image. We'll need this
|
||||
// later in code to reshape the images to their original
|
||||
|
@ -178,7 +178,7 @@ int main(int argc, char** argv)
|
||||
if( intrinsic_filename )
|
||||
{
|
||||
// reading intrinsic parameters
|
||||
FileStorage fs(intrinsic_filename, CV_STORAGE_READ);
|
||||
FileStorage fs(intrinsic_filename, FileStorage::READ);
|
||||
if(!fs.isOpened())
|
||||
{
|
||||
printf("Failed to open file %s\n", intrinsic_filename);
|
||||
@ -194,7 +194,7 @@ int main(int argc, char** argv)
|
||||
M1 *= scale;
|
||||
M2 *= scale;
|
||||
|
||||
fs.open(extrinsic_filename, CV_STORAGE_READ);
|
||||
fs.open(extrinsic_filename, FileStorage::READ);
|
||||
if(!fs.isOpened())
|
||||
{
|
||||
printf("Failed to open file %s\n", extrinsic_filename);
|
||||
|
@ -59,15 +59,15 @@ static void matPrint(Mat &img, int lineOffsY, Scalar fontColor, const string &ss
|
||||
Point org;
|
||||
org.x = 1;
|
||||
org.y = 3 * fontSize.height * (lineOffsY + 1) / 2;
|
||||
putText(img, ss, org, fontFace, fontScale, CV_RGB(0,0,0), 5*fontThickness/2, 16);
|
||||
putText(img, ss, org, fontFace, fontScale, Scalar(0,0,0), 5*fontThickness/2, 16);
|
||||
putText(img, ss, org, fontFace, fontScale, fontColor, fontThickness, 16);
|
||||
}
|
||||
|
||||
|
||||
static void displayState(Mat &canvas, bool bHelp, bool bGpu, bool bLargestFace, bool bFilter, double fps)
|
||||
{
|
||||
Scalar fontColorRed = CV_RGB(255,0,0);
|
||||
Scalar fontColorNV = CV_RGB(118,185,0);
|
||||
Scalar fontColorRed = Scalar(255,0,0);
|
||||
Scalar fontColorNV = Scalar(118,185,0);
|
||||
|
||||
ostringstream ss;
|
||||
ss << "FPS = " << setprecision(1) << fixed << fps;
|
||||
|
Loading…
Reference in New Issue
Block a user