War on Whitespace, master edition: trailing spaces.

This commit is contained in:
Roman Donchenko 2013-08-27 13:57:24 +04:00
parent 2c4bbb313c
commit 9b92545ce6
40 changed files with 263 additions and 263 deletions

View File

@ -27,7 +27,7 @@ endif(WITH_CUDA)
# --- Eigen ---
if(WITH_EIGEN)
find_path(EIGEN_INCLUDE_PATH "Eigen/Core"
PATHS /usr/local /opt /usr $ENV{EIGEN_ROOT}/include ENV ProgramFiles ENV ProgramW6432
PATHS /usr/local /opt /usr $ENV{EIGEN_ROOT}/include ENV ProgramFiles ENV ProgramW6432
PATH_SUFFIXES include/eigen3 include/eigen2 Eigen/include/eigen3 Eigen/include/eigen2
DOC "The path to Eigen3/Eigen2 headers"
CMAKE_FIND_ROOT_PATH_BOTH)

View File

@ -130,7 +130,7 @@ To compile it, assuming OpenCV is correctly installed, use the following command
Here is a code explanation :
Retina definition is present in the bioinspired package and a simple include allows to use it. You can rather use the specific header : *opencv2/bioinspired.hpp* if you prefer but then include the other required openv modules : *opencv2/core.hpp* and *opencv2/highgui.hpp*
Retina definition is present in the bioinspired package and a simple include allows to use it. You can rather use the specific header : *opencv2/bioinspired.hpp* if you prefer but then include the other required openv modules : *opencv2/core.hpp* and *opencv2/highgui.hpp*
.. code-block:: cpp

View File

@ -125,9 +125,9 @@ designed mostly for development purposes. This approach is deprecated for the pr
release package is recommended to communicate with OpenCV Manager via the async initialization
described above.
#. Add the OpenCV library project to your workspace the same way as for the async initialization
#. Add the OpenCV library project to your workspace the same way as for the async initialization
above. Use menu :guilabel:`File -> Import -> Existing project in your workspace`,
press :guilabel:`Browse` button and select OpenCV SDK path
press :guilabel:`Browse` button and select OpenCV SDK path
(:file:`OpenCV-2.4.6-android-sdk/sdk`).
.. image:: images/eclipse_opencv_dependency0.png

View File

@ -47,7 +47,7 @@ using namespace cv;
//////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////

View File

@ -529,16 +529,16 @@ int cv::recoverPose( InputArray E, InputArray _points1, InputArray _points2, Out
mask4 = (Q.row(2) > 0) & mask4;
mask4 = (Q.row(2) < dist) & mask4;
mask1 = mask1.t();
mask2 = mask2.t();
mask3 = mask3.t();
mask4 = mask4.t();
mask1 = mask1.t();
mask2 = mask2.t();
mask3 = mask3.t();
mask4 = mask4.t();
// If _mask is given, then use it to filter outliers.
if (!_mask.empty())
{
Mat mask = _mask.getMat();
CV_Assert(mask.size() == mask1.size());
CV_Assert(mask.size() == mask1.size());
bitwise_and(mask, mask1, mask1);
bitwise_and(mask, mask2, mask2);
bitwise_and(mask, mask3, mask3);
@ -546,7 +546,7 @@ int cv::recoverPose( InputArray E, InputArray _points1, InputArray _points2, Out
}
if (_mask.empty() && _mask.needed())
{
_mask.create(mask1.size(), CV_8U);
_mask.create(mask1.size(), CV_8U);
}
CV_Assert(_R.needed() && _t.needed());

View File

@ -47,30 +47,30 @@
This is translation to C++ of the Matlab's LMSolve package by Miroslav Balda.
Here is the original copyright:
============================================================================
Copyright (c) 2007, Miroslav Balda
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the distribution
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
@ -112,7 +112,7 @@ public:
gemm(J, r, 1, noArray(), 0, v, GEMM_1_T);
Mat D = A.diag().clone();
const double Rlo = 0.25, Rhi = 0.75;
double lambda = 1, lc = 0.75;
int i, iter = 0;
@ -222,5 +222,5 @@ Ptr<LMSolver> createLMSolver(const Ptr<LMSolver::Callback>& cb, int maxIters)
CV_Assert( !LMSolverImpl_info_auto.name().empty() );
return new LMSolverImpl(cb, maxIters);
}
}

View File

@ -52,30 +52,30 @@ TEST(Calib3d_Affine3f, accuracy)
cv::Mat expected;
cv::Rodrigues(rvec, expected);
ASSERT_EQ(0, norm(cv::Mat(affine.matrix, false).colRange(0, 3).rowRange(0, 3) != expected));
ASSERT_EQ(0, norm(cv::Mat(affine.linear()) != expected));
cv::Matx33d R = cv::Matx33d::eye();
double angle = 50;
R.val[0] = R.val[4] = std::cos(CV_PI*angle/180.0);
R.val[3] = std::sin(CV_PI*angle/180.0);
R.val[1] = -R.val[3];
cv::Affine3d affine1(cv::Mat(cv::Vec3d(0.2, 0.5, 0.3)).reshape(1, 1), cv::Vec3d(4, 5, 6));
cv::Affine3d affine2(R, cv::Vec3d(1, 1, 0.4));
cv::Affine3d result = affine1.inv() * affine2;
expected = cv::Mat(affine1.matrix.inv(cv::DECOMP_SVD)) * cv::Mat(affine2.matrix, false);
cv::Mat diff;
cv::absdiff(expected, result.matrix, diff);
ASSERT_LT(cv::norm(diff, cv::NORM_INF), 1e-15);
}

View File

@ -83,4 +83,4 @@ Draws keypoints.
:param flags: Flags setting drawing features. Possible ``flags`` bit values are defined by ``DrawMatchesFlags``. See details above in :ocv:func:`drawMatches` .
.. note:: For Python API, flags are modified as `cv2.DRAW_MATCHES_FLAGS_DEFAULT`, `cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS`, `cv2.DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG`, `cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS`
.. note:: For Python API, flags are modified as `cv2.DRAW_MATCHES_FLAGS_DEFAULT`, `cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS`, `cv2.DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG`, `cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS`

View File

@ -120,7 +120,7 @@ Finds keypoints in an image and computes their descriptors
:param descriptors: The output descriptors. Pass ``cv::noArray()`` if you do not need it.
:param useProvidedKeypoints: If it is true, then the method will use the provided vector of keypoints instead of detecting them.
BRISK
-----

View File

@ -1212,7 +1212,7 @@ static AVStream *icv_add_video_stream_FFMPEG(AVFormatContext *oc,
#if LIBAVUTIL_BUILD > CALC_FFMPEG_VERSION(51,11,0)
/* Some settings for libx264 encoding, restore dummy values for gop_size
and qmin since they will be set to reasonable defaults by the libx264
preset system. Also, use a crf encode with the default quality rating,
preset system. Also, use a crf encode with the default quality rating,
this seems easier than finding an appropriate default bitrate. */
if (c->codec_id == CODEC_ID_H264) {
c->gop_size = -1;

View File

@ -789,7 +789,7 @@ The function supports the in-place mode. Dilation can be applied several ( ``ite
* An example using the morphological dilate operation can be found at opencv_source_code/samples/cpp/morphology2.cpp
erode
@ -1000,17 +1000,17 @@ Returns Gabor filter coefficients.
.. ocv:pyfunction:: cv2.getGaborKernel(ksize, sigma, theta, lambd, gamma[, psi[, ktype]]) -> retval
:param ksize: Size of the filter returned.
:param sigma: Standard deviation of the gaussian envelope.
:param theta: Orientation of the normal to the parallel stripes of a Gabor function.
:param lambd: Wavelength of the sinusoidal factor.
:param gamma: Spatial aspect ratio.
:param psi: Phase offset.
:param ktype: Type of filter coefficients. It can be ``CV_32F`` or ``CV_64F`` .
For more details about gabor filter equations and parameters, see: `Gabor Filter <http://en.wikipedia.org/wiki/Gabor_filter>`_.
@ -1132,7 +1132,7 @@ Performs advanced morphological transformations.
:param dst: Destination image of the same size and type as ``src`` .
:param kernel: Structuring element. It can be created using :ocv:func:`getStructuringElement`.
:param anchor: Anchor position with the kernel. Negative values mean that the anchor is at the kernel center.
:param op: Type of a morphological operation that can be one of the following:

View File

@ -553,9 +553,9 @@ Finds the four vertices of a rotated rect. Useful to draw the rotated rectangle.
.. ocv:cfunction:: void cvBoxPoints( CvBox2D box, CvPoint2D32f pt[4] )
:param box: The input rotated rectangle. It may be the output of .. ocv:function:: minAreaRect.
:param points: The output array of four vertices of rectangles.
The function finds the four vertices of a rotated rectangle. This function is useful to draw the rectangle. In C++, instead of using this function, you can directly use box.points() method. Please visit the `tutorial on bounding rectangle <http://docs.opencv.org/doc/tutorials/imgproc/shapedescriptors/bounding_rects_circles/bounding_rects_circles.html#bounding-rects-circles>`_ for more information.

View File

@ -399,7 +399,7 @@ int cv::connectedComponentsWithStats(InputArray _img, OutputArray _labels, Outpu
const cv::Mat img = _img.getMat();
_labels.create(img.size(), CV_MAT_DEPTH(ltype));
cv::Mat labels = _labels.getMat();
connectedcomponents::CCStatsOp sop(statsv, centroids);
connectedcomponents::CCStatsOp sop(statsv, centroids);
if(ltype == CV_16U){
return connectedComponents_sub1(img, labels, connectivity, sop);
}else if(ltype == CV_32S){

View File

@ -571,14 +571,14 @@ static void fitLine3D( Point3f * points, int count, int dist,
for( j = 0; j < count; j++ )
w[j] = 1.f;
}
/* save the line parameters */
memcpy( _lineprev, _line, 6 * sizeof( float ));
/* Run again... */
fitLine3D_wods( points, count, w, _line );
}
if( err < min_err )
{
min_err = err;
@ -595,27 +595,27 @@ void cv::fitLine( InputArray _points, OutputArray _line, int distType,
double param, double reps, double aeps )
{
Mat points = _points.getMat();
float linebuf[6]={0.f};
int npoints2 = points.checkVector(2, -1, false);
int npoints3 = points.checkVector(3, -1, false);
CV_Assert( npoints2 >= 0 || npoints3 >= 0 );
if( points.depth() != CV_32F || !points.isContinuous() )
{
Mat temp;
points.convertTo(temp, CV_32F);
points = temp;
}
if( npoints2 >= 0 )
fitLine2D( points.ptr<Point2f>(), npoints2, distType,
(float)param, (float)reps, (float)aeps, linebuf);
else
fitLine3D( points.ptr<Point3f>(), npoints3, distType,
(float)param, (float)reps, (float)aeps, linebuf);
Mat(npoints2 >= 0 ? 4 : 6, 1, CV_32F, linebuf).copyTo(_line);
}

View File

@ -142,7 +142,7 @@ double cv::matchShapes(InputArray contour1, InputArray contour2, int method, dou
default:
CV_Error( CV_StsBadArg, "Unknown comparison method" );
}
return result;
}

View File

@ -159,7 +159,7 @@ static Moments contourMoments( const Mat& contour )
if( fabs(a00) > FLT_EPSILON )
{
double db1_2, db1_6, db1_12, db1_24, db1_20, db1_60;
if( a00 > 0 )
{
db1_2 = 0.5;
@ -464,7 +464,7 @@ cv::Moments cv::moments( InputArray _src, bool binary )
m.m03 += mom[9] + y * (3. * mom[5] + y * (3. * mom[2] + ym));
}
}
completeMomentState( &m );
return m;
}

View File

@ -204,7 +204,7 @@ pyrDown_( const Mat& _src, Mat& _dst, int borderType )
CastOp castOp;
VecOp vecOp;
CV_Assert( ssize.width > 0 && ssize.height > 0 &&
CV_Assert( ssize.width > 0 && ssize.height > 0 &&
std::abs(dsize.width*2 - ssize.width) <= 2 &&
std::abs(dsize.height*2 - ssize.height) <= 2 );
int k, x, sy0 = -PD_SZ/2, sy = sy0, width0 = std::min((ssize.width-PD_SZ/2-1)/2 + 1, dsize.width);

View File

@ -327,7 +327,7 @@ void cv::pyrMeanShiftFiltering( InputArray _src, OutputArray _dst,
double sr2 = sr * sr;
int isr2 = cvRound(sr2), isr22 = MAX(isr2,16);
int tab[768];
if( src0.type() != CV_8UC3 )
CV_Error( CV_StsUnsupportedFormat, "Only 8-bit, 3-channel images are supported" );

View File

@ -1690,7 +1690,7 @@ TEST(Imgproc_ColorBayer, regression)
Mat given = imread(string(ts->get_data_path()) + "/cvtcolor/bayer_input.png", IMREAD_GRAYSCALE);
Mat gold = imread(string(ts->get_data_path()) + "/cvtcolor/bayer_gold.png", IMREAD_UNCHANGED);
Mat result;
CV_Assert(given.data != NULL && gold.data != NULL);
cvtColor(given, result, CV_BayerBG2GRAY);

View File

@ -52,12 +52,12 @@ namespace cv
{
/*!
Extremal Region Stat structure
Extremal Region Stat structure
The ERStat structure represents a class-specific Extremal Region (ER).
An ER is a 4-connected set of pixels with all its grey-level values smaller than the values
in its outer boundary. A class-specific ER is selected (using a classifier) from all the ER's
An ER is a 4-connected set of pixels with all its grey-level values smaller than the values
in its outer boundary. A class-specific ER is selected (using a classifier) from all the ER's
in the component tree of the image.
*/
struct CV_EXPORTS ERStat
@ -69,17 +69,17 @@ public:
~ERStat(){};
//! seed point and the threshold (max grey-level value)
int pixel;
int level;
int pixel;
int level;
//! incrementally computable features
int area;
int area;
int perimeter;
int euler; //!< euler number
Rect rect;
double raw_moments[2]; //!< order 1 raw moments to derive the centroid
double central_moments[3]; //!< order 2 central moments to construct the covariance matrix
std::deque<int> *crossings;//!< horizontal crossings
std::deque<int> *crossings;//!< horizontal crossings
float med_crossings; //!< median of the crossings at three different height levels
//! 2nd stage features
@ -88,21 +88,21 @@ public:
float num_inflexion_points;
// TODO Other features can be added (average color, standard deviation, and such)
// TODO shall we include the pixel list whenever available (i.e. after 2nd stage) ?
std::vector<int> *pixels;
std::vector<int> *pixels;
//! probability that the ER belongs to the class we are looking for
double probability;
//! pointers preserving the tree structure of the component tree
ERStat* parent;
ERStat* child;
ERStat* parent;
ERStat* child;
ERStat* next;
ERStat* prev;
//! wenever the regions is a local maxima of the probability
//! wenever the regions is a local maxima of the probability
bool local_maxima;
ERStat* max_probability_ancestor;
ERStat* min_probability_ancestor;
@ -124,11 +124,11 @@ public:
public:
virtual ~Callback(){};
//! The classifier must return probability measure for the region.
virtual double eval(const ERStat& stat) = 0; //const = 0; //TODO why cannot use const = 0 here?
virtual double eval(const ERStat& stat) = 0; //const = 0; //TODO why cannot use const = 0 here?
};
/*!
the key method. Takes image on input and returns the selected regions in a vector of ERStat
/*!
the key method. Takes image on input and returns the selected regions in a vector of ERStat
only distinctive ERs which correspond to characters are selected by a sequential classifier
\param image is the input image
\param regions is output for the first stage, input/output for the second one.
@ -151,15 +151,15 @@ public:
/*!
Create an Extremal Region Filter for the 1st stage classifier of N&M algorithm
Neumann L., Matas J.: Real-Time Scene Text Localization and Recognition, CVPR 2012
The component tree of the image is extracted by a threshold increased step by step
from 0 to 255, incrementally computable descriptors (aspect_ratio, compactness,
number of holes, and number of horizontal crossings) are computed for each ER
and used as features for a classifier which estimates the class-conditional
probability P(er|character). The value of P(er|character) is tracked using the inclusion
relation of ER across all thresholds and only the ERs which correspond to local maximum
from 0 to 255, incrementally computable descriptors (aspect_ratio, compactness,
number of holes, and number of horizontal crossings) are computed for each ER
and used as features for a classifier which estimates the class-conditional
probability P(er|character). The value of P(er|character) is tracked using the inclusion
relation of ER across all thresholds and only the ERs which correspond to local maximum
of the probability P(er|character) are selected (if the local maximum of the
probability is above a global limit pmin and the difference between local maximum and
probability is above a global limit pmin and the difference between local maximum and
local minimum is greater than minProbabilityDiff).
\param cb Callback with the classifier.
@ -168,29 +168,29 @@ public:
\param minArea The minimum area (% of image size) allowed for retreived ER's
\param minArea The maximum area (% of image size) allowed for retreived ER's
\param minProbability The minimum probability P(er|character) allowed for retreived ER's
\param nonMaxSuppression Whenever non-maximum suppression is done over the branch probabilities
\param nonMaxSuppression Whenever non-maximum suppression is done over the branch probabilities
\param minProbability The minimum probability difference between local maxima and local minima ERs
*/
CV_EXPORTS Ptr<ERFilter> createERFilterNM1(const Ptr<ERFilter::Callback>& cb = NULL,
int thresholdDelta = 1, float minArea = 0.000025,
float maxArea = 0.13, float minProbability = 0.2,
bool nonMaxSuppression = true,
CV_EXPORTS Ptr<ERFilter> createERFilterNM1(const Ptr<ERFilter::Callback>& cb = NULL,
int thresholdDelta = 1, float minArea = 0.000025,
float maxArea = 0.13, float minProbability = 0.2,
bool nonMaxSuppression = true,
float minProbabilityDiff = 0.1);
/*!
Create an Extremal Region Filter for the 2nd stage classifier of N&M algorithm
Neumann L., Matas J.: Real-Time Scene Text Localization and Recognition, CVPR 2012
In the second stage, the ERs that passed the first stage are classified into character
In the second stage, the ERs that passed the first stage are classified into character
and non-character classes using more informative but also more computationally expensive
features. The classifier uses all the features calculated in the first stage and the following
features. The classifier uses all the features calculated in the first stage and the following
additional features: hole area ratio, convex hull ratio, and number of outer inflexion points.
\param cb Callback with the classifier
if omitted tries to load a default classifier from file trained_classifierNM2.xml
\param minProbability The minimum probability P(er|character) allowed for retreived ER's
*/
CV_EXPORTS Ptr<ERFilter> createERFilterNM2(const Ptr<ERFilter::Callback>& cb = NULL,
CV_EXPORTS Ptr<ERFilter> createERFilterNM2(const Ptr<ERFilter::Callback>& cb = NULL,
float minProbability = 0.85);
}

View File

@ -181,7 +181,7 @@ void groupRectangles(std::vector<Rect>& rectList, int groupThreshold, double eps
int n1 = rweights[i];
double w1 = rejectWeights[i];
int l1 = rejectLevels[i];
// filter out rectangles which don't have enough similar rectangles
if( n1 <= groupThreshold )
continue;

View File

@ -48,9 +48,9 @@ using namespace std;
namespace cv
{
ERStat::ERStat(int init_level, int init_pixel, int init_x, int init_y) : pixel(init_pixel),
level(init_level), area(0), perimeter(0), euler(0), probability(1.0),
parent(0), child(0), next(0), prev(0), local_maxima(0),
ERStat::ERStat(int init_level, int init_pixel, int init_x, int init_y) : pixel(init_pixel),
level(init_level), area(0), perimeter(0), euler(0), probability(1.0),
parent(0), child(0), next(0), prev(0), local_maxima(0),
max_probability_ancestor(0), min_probability_ancestor(0)
{
rect = Rect(init_x,init_y,1,1);
@ -76,17 +76,17 @@ public:
//Destructor
~ERFilterNM() {};
float minProbability;
float minProbability;
bool nonMaxSuppression;
float minProbabilityDiff;
// the key method. Takes image on input, vector of ERStat is output for the first stage,
// the key method. Takes image on input, vector of ERStat is output for the first stage,
// input/output - for the second one.
void run( InputArray image, std::vector<ERStat>& regions );
protected:
int thresholdDelta;
float maxArea;
float maxArea;
float minArea;
Ptr<ERFilter::Callback> classifier;
@ -116,8 +116,8 @@ private:
// extract the component tree and store all the ER regions
void er_tree_extract( InputArray image );
// accumulate a pixel into an ER
void er_add_pixel( ERStat *parent, int x, int y, int non_boundary_neighbours,
int non_boundary_neighbours_horiz,
void er_add_pixel( ERStat *parent, int x, int y, int non_boundary_neighbours,
int non_boundary_neighbours_horiz,
int d_C1, int d_C2, int d_C3 );
// merge an ER with its nested parent
void er_merge( ERStat *parent, ERStat *child );
@ -133,7 +133,7 @@ private:
// default 1st stage classifier
class CV_EXPORTS ERClassifierNM1 : public ERFilter::Callback
class CV_EXPORTS ERClassifierNM1 : public ERFilter::Callback
{
public:
//Constructor
@ -142,14 +142,14 @@ public:
~ERClassifierNM1() {};
// The classifier must return probability measure for the region.
double eval(const ERStat& stat);
double eval(const ERStat& stat);
private:
CvBoost boost;
};
// default 2nd stage classifier
class CV_EXPORTS ERClassifierNM2 : public ERFilter::Callback
class CV_EXPORTS ERClassifierNM2 : public ERFilter::Callback
{
public:
//constructor
@ -158,7 +158,7 @@ public:
~ERClassifierNM2() {};
// The classifier must return probability measure for the region.
double eval(const ERStat& stat);
double eval(const ERStat& stat);
private:
CvBoost boost;
@ -182,7 +182,7 @@ ERFilterNM::ERFilterNM()
classifier = NULL;
}
// the key method. Takes image on input, vector of ERStat is output for the first stage,
// the key method. Takes image on input, vector of ERStat is output for the first stage,
// input/output for the second one.
void ERFilterNM::run( InputArray image, std::vector<ERStat>& _regions )
{
@ -192,7 +192,7 @@ void ERFilterNM::run( InputArray image, std::vector<ERStat>& _regions )
regions = &_regions;
region_mask = Mat::zeros(image.getMat().rows+2, image.getMat().cols+2, CV_8UC1);
// if regions vector is empty we must extract the entire component tree
if ( regions->size() == 0 )
{
@ -237,13 +237,13 @@ void ERFilterNM::er_tree_extract( InputArray image )
src = (image.getMat() / thresholdDelta) -1;
}
const unsigned char * image_data = src.data;
int width = src.cols, height = src.rows;
const unsigned char * image_data = src.data;
int width = src.cols, height = src.rows;
// the component stack
vector<ERStat*> er_stack;
//the quads for euler number calculation
//the quads for euler number calculation
unsigned char quads[3][4];
quads[0][0] = 1 << 3;
quads[0][1] = 1 << 2;
@ -271,32 +271,32 @@ void ERFilterNM::er_tree_extract( InputArray image )
// we'll look initially for all pixels with grey-level lower than a grey-level higher than any allowed in the image
int threshold_level = (255/thresholdDelta)+1;
// starting from the first pixel (0,0)
int current_pixel = 0;
int current_edge = 0;
int current_level = image_data[0];
accessible_pixel_mask[0] = true;
bool push_new_component = true;
for (;;) {
int x = current_pixel % width;
int y = current_pixel / width;
// push a component with current level in the component stack
if (push_new_component)
if (push_new_component)
er_stack.push_back(new ERStat(current_level, current_pixel, x, y));
push_new_component = false;
// explore the (remaining) edges to the neighbors to the current pixel
for (current_edge = current_edge; current_edge < 4; current_edge++)
for (current_edge = current_edge; current_edge < 4; current_edge++)
{
int neighbour_pixel = current_pixel;
switch (current_edge)
switch (current_edge)
{
case 0: if (x < width - 1) neighbour_pixel = current_pixel + 1; break;
case 1: if (y < height - 1) neighbour_pixel = current_pixel + width; break;
@ -305,46 +305,46 @@ void ERFilterNM::er_tree_extract( InputArray image )
}
// if neighbour is not accessible, mark it accessible and retreive its grey-level value
if ( !accessible_pixel_mask[neighbour_pixel] && (neighbour_pixel != current_pixel) )
if ( !accessible_pixel_mask[neighbour_pixel] && (neighbour_pixel != current_pixel) )
{
int neighbour_level = image_data[neighbour_pixel];
accessible_pixel_mask[neighbour_pixel] = true;
// if neighbour level is not lower than current level add neighbour to the boundary heap
if (neighbour_level >= current_level)
// if neighbour level is not lower than current level add neighbour to the boundary heap
if (neighbour_level >= current_level)
{
boundary_pixes[neighbour_level].push_back(neighbour_pixel);
boundary_edges[neighbour_level].push_back(0);
// if neighbour level is lower than our threshold_level set threshold_level to neighbour level
if (neighbour_level < threshold_level)
threshold_level = neighbour_level;
}
else // if neighbour level is lower than current add current_pixel (and next edge)
}
else // if neighbour level is lower than current add current_pixel (and next edge)
// to the boundary heap for later processing
{
boundary_pixes[current_level].push_back(current_pixel);
boundary_edges[current_level].push_back(current_edge + 1);
// if neighbour level is lower than threshold_level set threshold_level to neighbour level
if (current_level < threshold_level)
threshold_level = current_level;
// consider the new pixel and its grey-level as current pixel
current_pixel = neighbour_pixel;
current_edge = 0;
current_level = neighbour_level;
// and push a new component
push_new_component = true;
break;
break;
}
}
} // else neigbor was already accessible
if (push_new_component) continue;
@ -363,12 +363,12 @@ void ERFilterNM::er_tree_extract( InputArray image )
quad_after[2] = 1<<2;
quad_after[3] = 1;
for (int edge = 0; edge < 8; edge++)
for (int edge = 0; edge < 8; edge++)
{
int neighbour4 = -1;
int neighbour8 = -1;
int cell = 0;
switch (edge)
switch (edge)
{
case 0: if (x < width - 1) { neighbour4 = neighbour8 = current_pixel + 1;} cell = 5; break;
case 1: if ((x < width - 1)&&(y < height - 1)) { neighbour8 = current_pixel + 1 + width;} cell = 8; break;
@ -391,7 +391,7 @@ void ERFilterNM::er_tree_extract( InputArray image )
{
if (accumulated_pixel_mask[neighbour8])
pix_value = image_data[neighbour8];
}
}
if (pix_value<=image_data[current_pixel])
{
@ -453,18 +453,18 @@ void ERFilterNM::er_tree_extract( InputArray image )
C_before[p]++;
if ( (quad_before[1] == quads[p][q]) && ((p<2)||(q<2)) )
C_before[p]++;
if ( (quad_before[2] == quads[p][q]) && ((p<2)||(q<2)) )
if ( (quad_before[2] == quads[p][q]) && ((p<2)||(q<2)) )
C_before[p]++;
if ( (quad_before[3] == quads[p][q]) && ((p<2)||(q<2)) )
C_before[p]++;
if ( (quad_after[0] == quads[p][q]) && ((p<2)||(q<2)) )
if ( (quad_after[0] == quads[p][q]) && ((p<2)||(q<2)) )
C_after[p]++;
if ( (quad_after[1] == quads[p][q]) && ((p<2)||(q<2)) )
if ( (quad_after[1] == quads[p][q]) && ((p<2)||(q<2)) )
C_after[p]++;
if ( (quad_after[2] == quads[p][q]) && ((p<2)||(q<2)) )
if ( (quad_after[2] == quads[p][q]) && ((p<2)||(q<2)) )
C_after[p]++;
if ( (quad_after[3] == quads[p][q]) && ((p<2)||(q<2)) )
if ( (quad_after[3] == quads[p][q]) && ((p<2)||(q<2)) )
C_after[p]++;
}
}
@ -475,9 +475,9 @@ void ERFilterNM::er_tree_extract( InputArray image )
er_add_pixel(er_stack.back(), x, y, non_boundary_neighbours, non_boundary_neighbours_horiz, d_C1, d_C2, d_C3);
accumulated_pixel_mask[current_pixel] = true;
// if we have processed all the possible threshold levels (the hea is empty) we are done!
if (threshold_level == (255/thresholdDelta)+1)
if (threshold_level == (255/thresholdDelta)+1)
{
// save the extracted regions into the output vector
@ -490,18 +490,18 @@ void ERFilterNM::er_tree_extract( InputArray image )
return;
}
// pop the heap of boundary pixels
current_pixel = boundary_pixes[threshold_level].back();
boundary_pixes[threshold_level].erase(boundary_pixes[threshold_level].end()-1);
current_edge = boundary_edges[threshold_level].back();
boundary_edges[threshold_level].erase(boundary_edges[threshold_level].end()-1);
while (boundary_pixes[threshold_level].empty() && (threshold_level < (255/thresholdDelta)+1))
threshold_level++;
int new_level = image_data[current_pixel];
// if the new pixel has higher grey value than the current one
@ -514,11 +514,11 @@ void ERFilterNM::er_tree_extract( InputArray image )
{
ERStat* er = er_stack.back();
er_stack.erase(er_stack.end()-1);
if (new_level < er_stack.back()->level)
if (new_level < er_stack.back()->level)
{
er_stack.push_back(new ERStat(new_level, current_pixel, current_pixel%width, current_pixel/width));
er_merge(er_stack.back(), er);
er_merge(er_stack.back(), er);
break;
}
@ -531,8 +531,8 @@ void ERFilterNM::er_tree_extract( InputArray image )
}
// accumulate a pixel into an ER
void ERFilterNM::er_add_pixel(ERStat *parent, int x, int y, int non_border_neighbours,
int non_border_neighbours_horiz,
void ERFilterNM::er_add_pixel(ERStat *parent, int x, int y, int non_border_neighbours,
int non_border_neighbours_horiz,
int d_C1, int d_C2, int d_C3)
{
parent->area++;
@ -575,7 +575,7 @@ void ERFilterNM::er_merge(ERStat *parent, ERStat *child)
parent->area += child->area;
parent->perimeter += child->perimeter;
for (int i=parent->rect.y; i<=min(parent->rect.br().y-1,child->rect.br().y-1); i++)
if (i-child->rect.y >= 0)
@ -584,12 +584,12 @@ void ERFilterNM::er_merge(ERStat *parent, ERStat *child)
for (int i=parent->rect.y-1; i>=child->rect.y; i--)
if (i-child->rect.y < (int)child->crossings->size())
parent->crossings->push_front(child->crossings->at(i-child->rect.y));
else
else
parent->crossings->push_front(0);
for (int i=parent->rect.br().y; i<child->rect.y; i++)
parent->crossings->push_back(0);
for (int i=max(parent->rect.br().y,child->rect.y); i<=child->rect.br().y-1; i++)
parent->crossings->push_back(child->crossings->at(i-child->rect.y));
@ -618,8 +618,8 @@ void ERFilterNM::er_merge(ERStat *parent, ERStat *child)
std::sort(m_crossings.begin(), m_crossings.end());
child->med_crossings = (float)m_crossings.at(1);
// free unnecessary mem
child->crossings->clear();
// free unnecessary mem
child->crossings->clear();
delete(child->crossings);
child->crossings = NULL;
@ -632,15 +632,15 @@ void ERFilterNM::er_merge(ERStat *parent, ERStat *child)
child->probability = classifier->eval(*child);
}
if ( ((classifier!=NULL)?(child->probability >= minProbability):true) &&
((child->area >= (minArea*region_mask.rows*region_mask.cols)) &&
if ( ((classifier!=NULL)?(child->probability >= minProbability):true) &&
((child->area >= (minArea*region_mask.rows*region_mask.cols)) &&
(child->area <= (maxArea*region_mask.rows*region_mask.cols))) )
{
num_accepted_regions++;
child->next = parent->child;
if (parent->child)
if (parent->child)
parent->child->prev = child;
parent->child = child;
child->parent = parent;
@ -658,7 +658,7 @@ void ERFilterNM::er_merge(ERStat *parent, ERStat *child)
while (new_child->next != NULL)
new_child = new_child->next;
new_child->next = parent->child;
if (parent->child)
if (parent->child)
parent->child->prev = new_child;
parent->child = child->child;
child->child->parent = parent;
@ -672,8 +672,8 @@ void ERFilterNM::er_merge(ERStat *parent, ERStat *child)
child->crossings = NULL;
}
delete(child);
}
}
}
// recursively walk the tree and clean memory
@ -691,11 +691,11 @@ void ERFilterNM::er_tree_clean( ERStat *stat )
}
delete stat;
}
// copy extracted regions into the output vector
ERStat* ERFilterNM::er_save( ERStat *er, ERStat *parent, ERStat *prev )
{
regions->push_back(*er);
regions->back().parent = parent;
@ -714,7 +714,7 @@ ERStat* ERFilterNM::er_save( ERStat *er, ERStat *parent, ERStat *prev )
this_er->probability = 0; //TODO this makes sense in order to select at least one region in short tree's but is it really necessary?
this_er->max_probability_ancestor = this_er;
this_er->min_probability_ancestor = this_er;
}
}
else
{
this_er->max_probability_ancestor = (this_er->probability > parent->max_probability_ancestor->probability)? this_er : parent->max_probability_ancestor;
@ -730,11 +730,11 @@ ERStat* ERFilterNM::er_save( ERStat *er, ERStat *parent, ERStat *prev )
// this_er->min_probability_ancestor->local_maxima = false;
this_er->max_probability_ancestor = this_er;
this_er->min_probability_ancestor = this_er;
this_er->min_probability_ancestor = this_er;
}
}
}
for (ERStat * child = er->child; child; child = child->next)
{
old_prev = er_save(child, this_er, old_prev);
@ -749,16 +749,16 @@ ERStat* ERFilterNM::er_tree_filter ( InputArray image, ERStat * stat, ERStat *pa
Mat src = image.getMat();
// assert correct image type
CV_Assert( src.type() == CV_8UC1 );
//Fill the region and calculate 2nd stage features
Mat region = region_mask(Rect(Point(stat->rect.x,stat->rect.y),Point(stat->rect.br().x+2,stat->rect.br().y+2)));
region = Scalar(0);
int newMaskVal = 255;
int flags = 4 + (newMaskVal << 8) + FLOODFILL_FIXED_RANGE + FLOODFILL_MASK_ONLY;
Rect rect;
floodFill( src(Rect(Point(stat->rect.x,stat->rect.y),Point(stat->rect.br().x,stat->rect.br().y))),
region, Point(stat->pixel%src.cols - stat->rect.x, stat->pixel/src.cols - stat->rect.y),
floodFill( src(Rect(Point(stat->rect.x,stat->rect.y),Point(stat->rect.br().x,stat->rect.br().y))),
region, Point(stat->pixel%src.cols - stat->rect.x, stat->pixel/src.cols - stat->rect.y),
Scalar(255), &rect, Scalar(stat->level), Scalar(0), flags );
rect.width += 2;
rect.height += 2;
@ -768,9 +768,9 @@ ERStat* ERFilterNM::er_tree_filter ( InputArray image, ERStat * stat, ERStat *pa
vector<Point> contour_poly;
vector<Vec4i> hierarchy;
findContours( region, contours, hierarchy, RETR_TREE, CHAIN_APPROX_NONE, Point(0, 0) );
//TODO check epsilon parameter of approxPolyDP (set empirically) : we want more precission
//TODO check epsilon parameter of approxPolyDP (set empirically) : we want more precission
// if the region is very small because otherwise we'll loose all the convexities
approxPolyDP( Mat(contours[0]), contour_poly, max(rect.width,rect.height)/25, true );
approxPolyDP( Mat(contours[0]), contour_poly, max(rect.width,rect.height)/25, true );
bool was_convex = false;
@ -829,11 +829,11 @@ ERStat* ERFilterNM::er_tree_filter ( InputArray image, ERStat * stat, ERStat *pa
if ( (classifier != NULL) && (stat->parent != NULL) )
{
stat->probability = classifier->eval(*stat);
}
}
if ( ( ((classifier != NULL)?(stat->probability >= minProbability):true) &&
((stat->area >= minArea*region_mask.rows*region_mask.cols) &&
(stat->area <= maxArea*region_mask.rows*region_mask.cols)) ) ||
if ( ( ((classifier != NULL)?(stat->probability >= minProbability):true) &&
((stat->area >= minArea*region_mask.rows*region_mask.cols) &&
(stat->area <= maxArea*region_mask.rows*region_mask.cols)) ) ||
(stat->parent == NULL) )
{
@ -979,19 +979,19 @@ int ERFilterNM::getNumRejected()
ERClassifierNM1::ERClassifierNM1()
{
if (ifstream("./trained_classifierNM1.xml"))
if (ifstream("./trained_classifierNM1.xml"))
{
// The file with default classifier exists
boost.load("./trained_classifierNM1.xml", "boost");
}
else if (ifstream("./training/trained_classifierNM1.xml"))
}
else if (ifstream("./training/trained_classifierNM1.xml"))
{
// The file with default classifier exists
boost.load("./training/trained_classifierNM1.xml", "boost");
}
else
}
else
{
// File not found
// File not found
CV_Error(CV_StsBadArg, "Default classifier ./trained_classifierNM1.xml not found!");
}
};
@ -1017,19 +1017,19 @@ double ERClassifierNM1::eval(const ERStat& stat)
ERClassifierNM2::ERClassifierNM2()
{
if (ifstream("./trained_classifierNM2.xml"))
if (ifstream("./trained_classifierNM2.xml"))
{
// The file with default classifier exists
boost.load("./trained_classifierNM2.xml", "boost");
}
else if (ifstream("./training/trained_classifierNM2.xml"))
}
else if (ifstream("./training/trained_classifierNM2.xml"))
{
// The file with default classifier exists
boost.load("./training/trained_classifierNM2.xml", "boost");
}
else
}
else
{
// File not found
// File not found
CV_Error(CV_StsBadArg, "Default classifier ./trained_classifierNM2.xml not found!");
}
};
@ -1040,7 +1040,7 @@ double ERClassifierNM2::eval(const ERStat& stat)
float arr[] = {0,(float)(stat.rect.width)/(stat.rect.height), // aspect ratio
sqrt((float)(stat.area))/stat.perimeter, // compactness
(float)(1-stat.euler), //number of holes
stat.med_crossings, stat.hole_area_ratio,
stat.med_crossings, stat.hole_area_ratio,
stat.convex_hull_ratio, stat.num_inflexion_points};
vector<float> sample (arr, arr + sizeof(arr) / sizeof(arr[0]) );
@ -1055,15 +1055,15 @@ double ERClassifierNM2::eval(const ERStat& stat)
/*!
Create an Extremal Region Filter for the 1st stage classifier of N&M algorithm
Neumann L., Matas J.: Real-Time Scene Text Localization and Recognition, CVPR 2012
The component tree of the image is extracted by a threshold increased step by step
from 0 to 255, incrementally computable descriptors (aspect_ratio, compactness,
number of holes, and number of horizontal crossings) are computed for each ER
and used as features for a classifier which estimates the class-conditional
probability P(er|character). The value of P(er|character) is tracked using the inclusion
relation of ER across all thresholds and only the ERs which correspond to local maximum
from 0 to 255, incrementally computable descriptors (aspect_ratio, compactness,
number of holes, and number of horizontal crossings) are computed for each ER
and used as features for a classifier which estimates the class-conditional
probability P(er|character). The value of P(er|character) is tracked using the inclusion
relation of ER across all thresholds and only the ERs which correspond to local maximum
of the probability P(er|character) are selected (if the local maximum of the
probability is above a global limit pmin and the difference between local maximum and
probability is above a global limit pmin and the difference between local maximum and
local minimum is greater than minProbabilityDiff).
\param cb Callback with the classifier.
@ -1072,11 +1072,11 @@ double ERClassifierNM2::eval(const ERStat& stat)
\param minArea The minimum area (% of image size) allowed for retreived ER's
\param minArea The maximum area (% of image size) allowed for retreived ER's
\param minProbability The minimum probability P(er|character) allowed for retreived ER's
\param nonMaxSuppression Whenever non-maximum suppression is done over the branch probabilities
\param nonMaxSuppression Whenever non-maximum suppression is done over the branch probabilities
\param minProbability The minimum probability difference between local maxima and local minima ERs
*/
Ptr<ERFilter> createERFilterNM1(const Ptr<ERFilter::Callback>& cb, int thresholdDelta,
float minArea, float maxArea, float minProbability,
Ptr<ERFilter> createERFilterNM1(const Ptr<ERFilter::Callback>& cb, int thresholdDelta,
float minArea, float maxArea, float minProbability,
bool nonMaxSuppression, float minProbabilityDiff)
{
@ -1086,7 +1086,7 @@ Ptr<ERFilter> createERFilterNM1(const Ptr<ERFilter::Callback>& cb, int threshold
CV_Assert( (minProbabilityDiff >= 0.) && (minProbabilityDiff <= 1.) );
Ptr<ERFilterNM> filter = new ERFilterNM();
if (cb == NULL)
filter->setCallback(new ERClassifierNM1());
else
@ -1105,9 +1105,9 @@ Ptr<ERFilter> createERFilterNM1(const Ptr<ERFilter::Callback>& cb, int threshold
Create an Extremal Region Filter for the 2nd stage classifier of N&M algorithm
Neumann L., Matas J.: Real-Time Scene Text Localization and Recognition, CVPR 2012
In the second stage, the ERs that passed the first stage are classified into character
In the second stage, the ERs that passed the first stage are classified into character
and non-character classes using more informative but also more computationally expensive
features. The classifier uses all the features calculated in the first stage and the following
features. The classifier uses all the features calculated in the first stage and the following
additional features: hole area ratio, convex hull ratio, and number of outer inflexion points.
\param cb Callback with the classifier
@ -1121,7 +1121,7 @@ Ptr<ERFilter> createERFilterNM2(const Ptr<ERFilter::Callback>& cb, float minProb
Ptr<ERFilterNM> filter = new ERFilterNM();
if (cb == NULL)
filter->setCallback(new ERClassifierNM2());
else

View File

@ -151,9 +151,9 @@ Returns void
:param temp1: Convolution kernel, a single-channel floating point matrix. The size is not greater than the ``image`` size. The type is the same as ``image``.
:param result: The destination image
:param ccorr: Flags to evaluate cross-correlation instead of convolution.
:param buf: Optional buffer to avoid extra memory allocations and to adjust some specific parameters. See :ocv:struct:`ocl::ConvolveBuf`.
Convolves an image with the kernel. Supports only CV_32FC1 data types and do not support ROI.

View File

@ -1782,7 +1782,7 @@ namespace cv
};
//! Returns the sorted result of all the elements in input based on equivalent keys.
//
// The element unit in the values to be sorted is determined from the data type,
// The element unit in the values to be sorted is determined from the data type,
// i.e., a CV_32FC2 input {a1a2, b1b2} will be considered as two elements, regardless its
// matrix dimension.
// both keys and values will be sorted inplace

View File

@ -189,7 +189,7 @@ void cv::ocl::oclMat::upload(const Mat &m)
temp = clCreateBuffer((cl_context)clCxt->oclContext(), CL_MEM_READ_WRITE,
(pitch * wholeSize.height + tail_padding - 1) / tail_padding * tail_padding, 0, &err);
openCLVerifyCall(err);
openCLMemcpy2D(clCxt, temp, pitch, m.datastart, m.step,
openCLMemcpy2D(clCxt, temp, pitch, m.datastart, m.step,
wholeSize.width * m.elemSize(), wholeSize.height, clMemcpyHostToDevice, 3);
}
else{
@ -198,7 +198,7 @@ void cv::ocl::oclMat::upload(const Mat &m)
openCLVerifyCall(err);
}
convert_C3C4(temp, *this);
openCLSafeCall(clReleaseMemObject(temp));
}

View File

@ -14,20 +14,20 @@ static void print_matrix(const Mat& x){
}
static void print_simplex_state(const Mat& c,const Mat& b,double v,const std::vector<int> N,const std::vector<int> B){
printf("\tprint simplex state\n");
printf("v=%g\n",v);
printf("here c goes\n");
print_matrix(c);
printf("non-basic: ");
print(Mat(N));
printf("\n");
printf("here b goes\n");
print_matrix(b);
printf("basic: ");
print(Mat(B));
printf("\n");
}
@ -185,7 +185,7 @@ static int initialize_simplex(Mat_<double>& c, Mat_<double>& b,double& v,vector<
if(indexToRow[I]<N.size()){
dprintf(("I=%d from nonbasic\n",I));
int iterator_offset=indexToRow[I];
c(0,iterator_offset)+=old_c(0,I);
c(0,iterator_offset)+=old_c(0,I);
print_matrix(c);
}else{
dprintf(("I=%d from basic\n",I));
@ -272,7 +272,7 @@ static int inner_simplex(Mat_<double>& c, Mat_<double>& b,double& v,vector<int>&
}
}
static inline void pivot(Mat_<double>& c,Mat_<double>& b,double& v,vector<int>& N,vector<int>& B,
static inline void pivot(Mat_<double>& c,Mat_<double>& b,double& v,vector<int>& N,vector<int>& B,
int leaving_index,int entering_index,vector<unsigned int>& indexToRow){
double Coef=b(leaving_index,entering_index);
for(int i=0;i<b.cols;i++){
@ -307,7 +307,7 @@ static inline void pivot(Mat_<double>& c,Mat_<double>& b,double& v,vector<int>&
}
dprintf(("v was %g\n",v));
v+=Coef*b(leaving_index,b.cols-1);
SWAP(int,N[entering_index],B[leaving_index]);
SWAP(int,indexToRow[N[entering_index]],indexToRow[B[leaving_index]]);
}

View File

@ -321,7 +321,7 @@ void BoostedSoftCascadeOctave::traverse(const CvBoostTree* tree, cv::FileStorage
fs << "}";
delete [] leafs;
}

View File

@ -1,6 +1,6 @@
//--------------------------------------------------------------------------------------------------
// A demo program of the Extremal Region Filter algorithm described in
// A demo program of the Extremal Region Filter algorithm described in
// Neumann L., Matas J.: Real-Time Scene Text Localization and Recognition, CVPR 2012
//--------------------------------------------------------------------------------------------------
@ -21,7 +21,7 @@ void er_draw(Mat &src, Mat &dst, ERStat& er);
void er_draw(Mat &src, Mat &dst, ERStat& er)
{
if (er.parent != NULL) // deprecate the root region
if (er.parent != NULL) // deprecate the root region
{
int newMaskVal = 255;
int flags = 4 + (newMaskVal << 8) + FLOODFILL_FIXED_RANGE + FLOODFILL_MASK_ONLY;
@ -29,7 +29,7 @@ void er_draw(Mat &src, Mat &dst, ERStat& er)
}
}
int main(int argc, const char * argv[])
{
@ -54,14 +54,14 @@ int main(int argc, const char * argv[])
}
Mat grey(original.size(),CV_8UC1);
cvtColor(original,grey,COLOR_RGB2GRAY);
double t = (double)getTickCount();
// Build ER tree and filter with the 1st stage default classifier
Ptr<ERFilter> er_filter1 = createERFilterNM1();
er_filter1->run(grey, regions);
t = (double)getTickCount() - t;
cout << " --------------------------------------------------------------------------------------------------" << endl;
cout << "\t FIRST STAGE CLASSIFIER done in " << t * 1000. / getTickFrequency() << " ms." << endl;
@ -87,11 +87,11 @@ int main(int argc, const char * argv[])
}
t = (double)getTickCount();
// Default second stage classifier
Ptr<ERFilter> er_filter2 = createERFilterNM2();
er_filter2->run(grey, regions);
t = (double)getTickCount() - t;
cout << " --------------------------------------------------------------------------------------------------" << endl;
cout << "\t SECOND STAGE CLASSIFIER done in " << t * 1000. / getTickFrequency() << " ms." << endl;

View File

@ -17,7 +17,7 @@ static void help(char** argv)
int main(int argc, char** argv)
{
if(argc != 2)
if(argc != 2)
{
help(argv);
return 1;
@ -25,28 +25,28 @@ int main(int argc, char** argv)
string first_file = argv[1];
VideoCapture sequence(first_file);
if (!sequence.isOpened())
{
cerr << "Failed to open the image sequence!\n" << endl;
return 1;
}
Mat image;
namedWindow("Image sequence | press ESC to close", 1);
for(;;)
{
// Read in image from sequence
sequence >> image;
// If no image was retrieved -> end of sequence
if(image.empty())
{
cout << "End of Sequence" << endl;
break;
}
imshow("Image sequence | press ESC to close", image);
if(waitKey(500) == 27)

View File

@ -41,15 +41,15 @@ namespace {
cout << "press space to save a picture. q or esc to quit" << endl;
namedWindow(window_name, WINDOW_KEEPRATIO); //resizable window;
Mat frame;
for (;;) {
capture >> frame;
if (frame.empty())
break;
imshow(window_name, frame);
char key = (char)waitKey(30); //delay N millis, usually long enough to display and capture input
switch (key) {
case 'q':
case 'Q':

View File

@ -119,19 +119,19 @@ if __name__ == '__main__':
img1 = cv2.imread(fn1, 0)
img2 = cv2.imread(fn2, 0)
detector, matcher = init_feature(feature_name)
if img1 is None:
print 'Failed to load fn1:', fn1
sys.exit(1)
if img2 is None:
print 'Failed to load fn2:', fn2
sys.exit(1)
if detector is None:
print 'unknown feature:', feature_name
sys.exit(1)
print 'using', feature_name
pool=ThreadPool(processes = cv2.getNumberOfCPUs())

View File

@ -102,7 +102,7 @@ class App(object):
vis[:] = prob[...,np.newaxis]
try:
cv2.ellipse(vis, track_box, (0, 0, 255), 2)
except:
except:
print track_box
cv2.imshow('camshift', vis)
@ -119,7 +119,7 @@ if __name__ == '__main__':
import sys
try:
video_src = sys.argv[1]
except:
except:
video_src = 0
print __doc__
App(video_src).run()

View File

@ -40,9 +40,9 @@ def coherence_filter(img, sigma = 11, str_sigma = 11, blend = 0.5, iter_n = 4):
if __name__ == '__main__':
import sys
try:
try:
fn = sys.argv[1]
except:
except:
fn = '../cpp/baboon.jpg'
src = cv2.imread(fn)

View File

@ -141,7 +141,7 @@ class App:
count = tk.IntVar()
while True:
match_index = text.search(pattern, 'matchPos', count=count, regexp=regexp, stopindex='end')
if not match_index:
if not match_index:
break
end_index = text.index( "%s+%sc" % (match_index, count.get()) )
text.mark_set('matchPos', end_index)

View File

@ -143,15 +143,15 @@ if __name__ == '__main__':
if img1 is None:
print 'Failed to load fn1:', fn1
sys.exit(1)
if img2 is None:
print 'Failed to load fn2:', fn2
sys.exit(1)
if detector is None:
print 'unknown feature:', feature_name
sys.exit(1)
print 'using', feature_name
kp1, desc1 = detector.detectAndCompute(img1, None)

View File

@ -51,14 +51,14 @@ if __name__ == '__main__':
print __doc__
try:
img_fn = sys.argv[1]
except:
except:
img_fn = '../cpp/baboon.jpg'
img = cv2.imread(img_fn)
if img is None:
print 'Failed to load image file:', img_fn
sys.exit(1)
filters = build_filters()
with Timer('running single-threaded'):

View File

@ -61,7 +61,7 @@ if __name__ == '__main__':
print "usage : python hist.py <image_file>"
im = cv2.imread(fname)
if im is None:
print 'Failed to load image file:', fname
sys.exit(1)

View File

@ -2,7 +2,7 @@
'''
This example illustrates how to use cv2.HoughCircles() function.
Usage: ./houghcircles.py [<image_name>]
Usage: ./houghcircles.py [<image_name>]
image argument defaults to ../cpp/board.jpg
'''

View File

@ -23,16 +23,16 @@ if __name__ == '__main__':
import sys
try:
fn = sys.argv[1]
except:
except:
fn = '../cpp/fruits.jpg'
print __doc__
img = cv2.imread(fn)
if img is None:
print 'Failed to load image file:', fn
sys.exit(1)
img_mark = img.copy()
mark = np.zeros(img.shape[:2], np.uint8)
sketch = Sketcher('img', [img_mark, mark], lambda : ((255, 255, 255), 255))

View File

@ -27,13 +27,13 @@ if __name__ == '__main__':
fn = sys.argv[1]
except:
fn = '../cpp/baboon.jpg'
img = cv2.imread(fn)
if img is None:
print 'Failed to load image file:', fn
sys.exit(1)
cv2.imshow('original', img)
modes = cycle(['erode/dilate', 'open/close', 'blackhat/tophat', 'gradient'])