mirror of
https://github.com/opencv/opencv.git
synced 2024-11-24 03:00:14 +08:00
Fix spelling typos
This commit is contained in:
parent
89d3f95a8e
commit
659ffaddb4
@ -1078,8 +1078,8 @@ void cvCreateTrainingSamples( const char* filename,
|
||||
icvPlaceDistortedSample( sample, inverse, maxintensitydev,
|
||||
maxxangle, maxyangle, maxzangle,
|
||||
0 /* nonzero means placing image without cut offs */,
|
||||
0.0 /* nozero adds random shifting */,
|
||||
0.0 /* nozero adds random scaling */,
|
||||
0.0 /* nonzero adds random shifting */,
|
||||
0.0 /* nonzero adds random scaling */,
|
||||
&data );
|
||||
|
||||
if( showsamples )
|
||||
|
@ -45,7 +45,7 @@ protected:
|
||||
};
|
||||
std::vector<Feature> features;
|
||||
|
||||
cv::Mat normSum; //for nomalization calculation (L1 or L2)
|
||||
cv::Mat normSum; //for normalization calculation (L1 or L2)
|
||||
std::vector<cv::Mat> hist;
|
||||
};
|
||||
|
||||
@ -70,7 +70,7 @@ inline float CvHOGEvaluator::Feature::calc( const std::vector<cv::Mat>& _hists,
|
||||
|
||||
const float *pnormSum = _normSum.ptr<float>((int)y);
|
||||
normFactor = (float)(pnormSum[fastRect[0].p0] - pnormSum[fastRect[1].p1] - pnormSum[fastRect[2].p2] + pnormSum[fastRect[3].p3]);
|
||||
res = (res > 0.001f) ? ( res / (normFactor + 0.001f) ) : 0.f; //for cutting negative values, which apper due to floating precision
|
||||
res = (res > 0.001f) ? ( res / (normFactor + 0.001f) ) : 0.f; //for cutting negative values, which appear due to floating precision
|
||||
|
||||
return res;
|
||||
}
|
||||
|
@ -145,7 +145,7 @@ no child, parent is contour-3. So array is [-1,-1,-1,3].
|
||||
And this is the final guy, Mr.Perfect. It retrieves all the contours and creates a full family
|
||||
hierarchy list. **It even tells, who is the grandpa, father, son, grandson and even beyond... :)**.
|
||||
|
||||
For examle, I took above image, rewrite the code for cv.RETR_TREE, reorder the contours as per the
|
||||
For example, I took above image, rewrite the code for cv.RETR_TREE, reorder the contours as per the
|
||||
result given by OpenCV and analyze it. Again, red letters give the contour number and green letters
|
||||
give the hierarchy order.
|
||||
|
||||
|
@ -17,7 +17,7 @@ In short, we found locations of some parts of an object in another cluttered ima
|
||||
is sufficient to find the object exactly on the trainImage.
|
||||
|
||||
For that, we can use a function from calib3d module, ie **cv.findHomography()**. If we pass the set
|
||||
of points from both the images, it will find the perpective transformation of that object. Then we
|
||||
of points from both the images, it will find the perspective transformation of that object. Then we
|
||||
can use **cv.perspectiveTransform()** to find the object. It needs atleast four correct points to
|
||||
find the transformation.
|
||||
|
||||
@ -68,7 +68,7 @@ Now we set a condition that atleast 10 matches (defined by MIN_MATCH_COUNT) are
|
||||
find the object. Otherwise simply show a message saying not enough matches are present.
|
||||
|
||||
If enough matches are found, we extract the locations of matched keypoints in both the images. They
|
||||
are passed to find the perpective transformation. Once we get this 3x3 transformation matrix, we use
|
||||
are passed to find the perspective transformation. Once we get this 3x3 transformation matrix, we use
|
||||
it to transform the corners of queryImage to corresponding points in trainImage. Then we draw it.
|
||||
@code{.py}
|
||||
if len(good)>MIN_MATCH_COUNT:
|
||||
|
@ -28,7 +28,7 @@ If it is a greater than a threshold value, it is considered as a corner. If we p
|
||||
![image](images/shitomasi_space.png)
|
||||
|
||||
From the figure, you can see that only when \f$\lambda_1\f$ and \f$\lambda_2\f$ are above a minimum value,
|
||||
\f$\lambda_{min}\f$, it is conidered as a corner(green region).
|
||||
\f$\lambda_{min}\f$, it is considered as a corner(green region).
|
||||
|
||||
Code
|
||||
----
|
||||
|
@ -144,7 +144,7 @@ cv.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
|
||||
### 7.b. Rotated Rectangle
|
||||
|
||||
Here, bounding rectangle is drawn with minimum area, so it considers the rotation also. The function
|
||||
used is **cv.minAreaRect()**. It returns a Box2D structure which contains following detals - (
|
||||
used is **cv.minAreaRect()**. It returns a Box2D structure which contains following details - (
|
||||
center (x,y), (width, height), angle of rotation ). But to draw this rectangle, we need 4 corners of
|
||||
the rectangle. It is obtained by the function **cv.boxPoints()**
|
||||
@code{.py}
|
||||
|
@ -185,7 +185,7 @@ array([[[ 3, -1, 1, -1],
|
||||
And this is the final guy, Mr.Perfect. It retrieves all the contours and creates a full family
|
||||
hierarchy list. **It even tells, who is the grandpa, father, son, grandson and even beyond... :)**.
|
||||
|
||||
For examle, I took above image, rewrite the code for cv.RETR_TREE, reorder the contours as per the
|
||||
For example, I took above image, rewrite the code for cv.RETR_TREE, reorder the contours as per the
|
||||
result given by OpenCV and analyze it. Again, red letters give the contour number and green letters
|
||||
give the hierarchy order.
|
||||
|
||||
|
@ -381,7 +381,7 @@ Here is explained in detail the code for the real time application:
|
||||
as not, there are false correspondences or also called *outliers*. The [Random Sample
|
||||
Consensus](http://en.wikipedia.org/wiki/RANSAC) or *Ransac* is a non-deterministic iterative
|
||||
method which estimate parameters of a mathematical model from observed data producing an
|
||||
approximate result as the number of iterations increase. After appyling *Ransac* all the *outliers*
|
||||
approximate result as the number of iterations increase. After applying *Ransac* all the *outliers*
|
||||
will be eliminated to then estimate the camera pose with a certain probability to obtain a good
|
||||
solution.
|
||||
|
||||
|
@ -153,7 +153,7 @@ file name before running the application, e.g.:
|
||||
|
||||
$ GRAPH_DUMP_PATH=segm.dot ./bin/example_tutorial_porting_anisotropic_image_segmentation_gapi
|
||||
|
||||
Now this file can be visalized with a `dot` command like this:
|
||||
Now this file can be visualized with a `dot` command like this:
|
||||
|
||||
$ dot segm.dot -Tpng -o segm.png
|
||||
|
||||
@ -368,7 +368,7 @@ visualization like this:
|
||||
|
||||
![Anisotropic image segmentation graph with OpenCV & Fluid kernels](pics/segm_fluid.gif)
|
||||
|
||||
This graph doesn't differ structually from its previous version (in
|
||||
This graph doesn't differ structurally from its previous version (in
|
||||
terms of operations and data objects), though a changed layout (on the
|
||||
left side of the dump) is easily noticeable.
|
||||
|
||||
|
@ -427,7 +427,7 @@ the ROI, which will lead to accuracy improvement.
|
||||
Unfortunately, another problem occurs if we do that:
|
||||
if the rectangular ROI is near the border, a describing square will probably go
|
||||
out of the frame --- that leads to errors of the landmarks detector.
|
||||
To aviod such a mistake, we have to implement an algorithm that, firstly,
|
||||
To avoid such a mistake, we have to implement an algorithm that, firstly,
|
||||
describes every rectangle by a square, then counts the farthest coordinates
|
||||
turned up to be outside of the frame and, finally, pads the source image by
|
||||
borders (e.g. single-colored) with the size counted. It will be safe to take
|
||||
|
@ -145,7 +145,7 @@ description requires three parameters:
|
||||
regular "functions" which take and return data. Here network
|
||||
`Faces` (a detector) takes a cv::GMat and returns a cv::GMat, while
|
||||
network `AgeGender` is known to provide two outputs (age and gender
|
||||
blobs, respecitvely) -- so its has a `std::tuple<>` as a return
|
||||
blobs, respectively) -- so its has a `std::tuple<>` as a return
|
||||
type.
|
||||
3. A topology name -- can be any non-empty string, G-API is using
|
||||
these names to distinguish networks inside. Names should be unique
|
||||
|
@ -499,7 +499,7 @@ using the following OpenCV methods:
|
||||
- the imwrite static method from the Highgui class to write an image to a file
|
||||
- the GaussianBlur static method from the Imgproc class to apply to blur the original image
|
||||
|
||||
We're also going to use the Mat class which is returned from the imread method and accpeted as the
|
||||
We're also going to use the Mat class which is returned from the imread method and accepted as the
|
||||
main argument to both the GaussianBlur and the imwrite methods.
|
||||
|
||||
### Add an image to the project
|
||||
|
@ -10,7 +10,7 @@ In this tutorial,
|
||||
- We will see the basics of face detection and eye detection using the Haar Feature-based Cascade Classifiers
|
||||
- We will use the @ref cv::CascadeClassifier class to detect objects in a video stream. Particularly, we
|
||||
will use the functions:
|
||||
- @ref cv::CascadeClassifier::load to load a .xml classifier file. It can be either a Haar or a LBP classifer
|
||||
- @ref cv::CascadeClassifier::load to load a .xml classifier file. It can be either a Haar or a LBP classifier
|
||||
- @ref cv::CascadeClassifier::detectMultiScale to perform the detection.
|
||||
|
||||
Theory
|
||||
|
@ -168,7 +168,7 @@ Command line arguments of opencv_traincascade application grouped by purposes:
|
||||
- `-w <sampleWidth>` : Width of training samples (in pixels). Must have exactly the same value as used during training samples creation (opencv_createsamples utility).
|
||||
- `-h <sampleHeight>` : Height of training samples (in pixels). Must have exactly the same value as used during training samples creation (opencv_createsamples utility).
|
||||
|
||||
- Boosted classifer parameters:
|
||||
- Boosted classifier parameters:
|
||||
- `-bt <{DAB, RAB, LB, GAB(default)}>` : Type of boosted classifiers: DAB - Discrete AdaBoost, RAB - Real AdaBoost, LB - LogitBoost, GAB - Gentle AdaBoost.
|
||||
- `-minHitRate <min_hit_rate>` : Minimal desired hit rate for each stage of the classifier. Overall hit rate may be estimated as (min_hit_rate ^ number_of_stages), @cite Viola04 §4.1.
|
||||
- `-maxFalseAlarmRate <max_false_alarm_rate>` : Maximal desired false alarm rate for each stage of the classifier. Overall false alarm rate may be estimated as (max_false_alarm_rate ^ number_of_stages), @cite Viola04 §4.1.
|
||||
|
@ -43,7 +43,7 @@ VideoCapture can retrieve the following data:
|
||||
- CAP_OPENNI_POINT_CLOUD_MAP - XYZ in meters (CV_32FC3)
|
||||
- CAP_OPENNI_DISPARITY_MAP - disparity in pixels (CV_8UC1)
|
||||
- CAP_OPENNI_DISPARITY_MAP_32F - disparity in pixels (CV_32FC1)
|
||||
- CAP_OPENNI_VALID_DEPTH_MASK - mask of valid pixels (not ocluded, not shaded etc.)
|
||||
- CAP_OPENNI_VALID_DEPTH_MASK - mask of valid pixels (not occluded, not shaded etc.)
|
||||
(CV_8UC1)
|
||||
|
||||
-# data given from BGR image generator:
|
||||
|
@ -1321,7 +1321,7 @@ struct CV_EXPORTS_W_SIMPLE CirclesGridFinderParameters
|
||||
GridType gridType;
|
||||
|
||||
CV_PROP_RW float squareSize; //!< Distance between two adjacent points. Used by CALIB_CB_CLUSTERING.
|
||||
CV_PROP_RW float maxRectifiedDistance; //!< Max deviation from predicion. Used by CALIB_CB_CLUSTERING.
|
||||
CV_PROP_RW float maxRectifiedDistance; //!< Max deviation from prediction. Used by CALIB_CB_CLUSTERING.
|
||||
};
|
||||
|
||||
#ifndef DISABLE_OPENCV_3_COMPATIBILITY
|
||||
|
@ -48,7 +48,7 @@
|
||||
#include <iterator>
|
||||
|
||||
/*
|
||||
This is stright-forward port v3 of Matlab calibration engine by Jean-Yves Bouguet
|
||||
This is straight-forward port v3 of Matlab calibration engine by Jean-Yves Bouguet
|
||||
that is (in a large extent) based on the paper:
|
||||
Z. Zhang. "A flexible new technique for camera calibration".
|
||||
IEEE Transactions on Pattern Analysis and Machine Intelligence, 22(11):1330-1334, 2000.
|
||||
|
@ -2474,7 +2474,7 @@ int Chessboard::Board::validateCorners(const cv::Mat &data,cv::flann::Index &fla
|
||||
std::vector<cv::Point2f>::const_iterator iter1 = points.begin();
|
||||
for(;iter1 != points.end();++iter1)
|
||||
{
|
||||
// we do not have to check for NaN because of getCorners(flase)
|
||||
// we do not have to check for NaN because of getCorners(false)
|
||||
std::vector<cv::Point2f>::const_iterator iter2 = iter1+1;
|
||||
for(;iter2 != points.end();++iter2)
|
||||
if(*iter1 == *iter2)
|
||||
@ -3007,7 +3007,7 @@ Chessboard::Board Chessboard::detectImpl(const Mat& gray,std::vector<cv::Mat> &f
|
||||
if(keypoints_seed.empty())
|
||||
return Chessboard::Board();
|
||||
|
||||
// check how many points are likely a checkerbord corner
|
||||
// check how many points are likely a checkerboard corner
|
||||
float response = fabs(keypoints_seed.front().response*MIN_RESPONSE_RATIO);
|
||||
std::vector<KeyPoint>::const_iterator seed_iter = keypoints_seed.begin();
|
||||
int count = 0;
|
||||
|
@ -650,7 +650,7 @@ class Chessboard: public cv::Feature2D
|
||||
bool top(bool check_empty=false); // moves one corner to the top or returns false
|
||||
bool checkCorner()const; // returns true if the current corner belongs to at least one
|
||||
// none empty cell
|
||||
bool isNaN()const; // returns true if the currnet corner is NaN
|
||||
bool isNaN()const; // returns true if the current corner is NaN
|
||||
|
||||
const cv::Point2f* operator*() const; // current corner coordinate
|
||||
cv::Point2f* operator*(); // current corner coordinate
|
||||
|
@ -94,7 +94,7 @@ void CV_ChessboardDetectorBadArgTest::run( int /*start_from */)
|
||||
img = cb.clone();
|
||||
initArgs();
|
||||
pattern_size = Size(2,2);
|
||||
errors += run_test_case( Error::StsOutOfRange, "Invlid pattern size" );
|
||||
errors += run_test_case( Error::StsOutOfRange, "Invalid pattern size" );
|
||||
pattern_size = cbg.cornersSize();
|
||||
|
||||
cb.convertTo(img, CV_32F);
|
||||
|
@ -1309,7 +1309,7 @@ CVAPI(void) cvMulTransposed( const CvArr* src, CvArr* dst, int order,
|
||||
const CvArr* delta CV_DEFAULT(NULL),
|
||||
double scale CV_DEFAULT(1.) );
|
||||
|
||||
/** Tranposes matrix. Square matrices can be transposed in-place */
|
||||
/** Transposes matrix. Square matrices can be transposed in-place */
|
||||
CVAPI(void) cvTranspose( const CvArr* src, CvArr* dst );
|
||||
#define cvT cvTranspose
|
||||
|
||||
|
@ -569,7 +569,7 @@ inline v_int64x4 v256_blend(const v_int64x4& a, const v_int64x4& b)
|
||||
{ return v_int64x4(v256_blend<m>(v_uint64x4(a.val), v_uint64x4(b.val)).val); }
|
||||
|
||||
// shuffle
|
||||
// todo: emluate 64bit
|
||||
// todo: emulate 64bit
|
||||
#define OPENCV_HAL_IMPL_AVX_SHUFFLE(_Tpvec, intrin) \
|
||||
template<int m> \
|
||||
inline _Tpvec v256_shuffle(const _Tpvec& a) \
|
||||
|
@ -73,7 +73,7 @@ implemented as a structure based on a one SIMD register.
|
||||
|
||||
- cv::v_uint8x16 and cv::v_int8x16: sixteen 8-bit integer values (unsigned/signed) - char
|
||||
- cv::v_uint16x8 and cv::v_int16x8: eight 16-bit integer values (unsigned/signed) - short
|
||||
- cv::v_uint32x4 and cv::v_int32x4: four 32-bit integer values (unsgined/signed) - int
|
||||
- cv::v_uint32x4 and cv::v_int32x4: four 32-bit integer values (unsigned/signed) - int
|
||||
- cv::v_uint64x2 and cv::v_int64x2: two 64-bit integer values (unsigned/signed) - int64
|
||||
- cv::v_float32x4: four 32-bit floating point values (signed) - float
|
||||
- cv::v_float64x2: two 64-bit floating point values (signed) - double
|
||||
|
@ -1805,7 +1805,7 @@ inline v_float32x4 v_broadcast_element(const v_float32x4& a)
|
||||
return v_setall_f32(v_extract_n<i>(a));
|
||||
}
|
||||
|
||||
////// FP16 suport ///////
|
||||
////// FP16 support ///////
|
||||
#if CV_FP16
|
||||
inline v_float32x4 v_load_expand(const float16_t* ptr)
|
||||
{
|
||||
|
@ -94,7 +94,7 @@ struct v_uint16x8
|
||||
}
|
||||
ushort get0() const
|
||||
{
|
||||
return (ushort)wasm_i16x8_extract_lane(val, 0); // wasm_u16x8_extract_lane() unimplemeted yet
|
||||
return (ushort)wasm_i16x8_extract_lane(val, 0); // wasm_u16x8_extract_lane() unimplemented yet
|
||||
}
|
||||
|
||||
v128_t val;
|
||||
|
@ -50,7 +50,7 @@ typedef double v1f64 __attribute__ ((vector_size(8), aligned(8)));
|
||||
#define msa_ld1q_f32(__a) ((v4f32)__builtin_msa_ld_w(__a, 0))
|
||||
#define msa_ld1q_f64(__a) ((v2f64)__builtin_msa_ld_d(__a, 0))
|
||||
|
||||
/* Store 64bits vector elments values to the given memory address. */
|
||||
/* Store 64bits vector elements values to the given memory address. */
|
||||
#define msa_st1_s8(__a, __b) (*((v8i8*)(__a)) = __b)
|
||||
#define msa_st1_s16(__a, __b) (*((v4i16*)(__a)) = __b)
|
||||
#define msa_st1_s32(__a, __b) (*((v2i32*)(__a)) = __b)
|
||||
@ -377,7 +377,7 @@ typedef double v1f64 __attribute__ ((vector_size(8), aligned(8)));
|
||||
})
|
||||
|
||||
/* Right shift elements in a 128 bits vector by an immediate value, saturate the results and them in a 64 bits vector.
|
||||
Input is signed and outpus is unsigned. */
|
||||
Input is signed and output is unsigned. */
|
||||
#define msa_qrshrun_n_s16(__a, __b) \
|
||||
({ \
|
||||
v8i16 __d = __builtin_msa_srlri_h(__builtin_msa_max_s_h(__builtin_msa_fill_h(0), (v8i16)(__a)), (int)(__b)); \
|
||||
|
@ -62,7 +62,7 @@ static String getDeviceTypeString(const cv::ocl::Device& device)
|
||||
}
|
||||
}
|
||||
|
||||
return "unkown";
|
||||
return "unknown";
|
||||
}
|
||||
} // namespace
|
||||
|
||||
|
@ -165,7 +165,7 @@ public:
|
||||
|
||||
/** @brief Sets the initial step that will be used in downhill simplex algorithm.
|
||||
|
||||
Step, together with initial point (givin in DownhillSolver::minimize) are two `n`-dimensional
|
||||
Step, together with initial point (given in DownhillSolver::minimize) are two `n`-dimensional
|
||||
vectors that are used to determine the shape of initial simplex. Roughly said, initial point
|
||||
determines the position of a simplex (it will become simplex's centroid), while step determines the
|
||||
spread (size in each dimension) of a simplex. To be more precise, if \f$s,x_0\in\mathbb{R}^n\f$ are
|
||||
|
@ -317,7 +317,7 @@ VSX_IMPL_1RG(vec_udword2, wi, vec_float4, wf, xvcvspuxds, vec_ctulo)
|
||||
* Also there's already an open bug https://bugs.llvm.org/show_bug.cgi?id=31837
|
||||
*
|
||||
* So we're not able to use inline asm and only use built-in functions that CLANG supports
|
||||
* and use __builtin_convertvector if clang missng any of vector conversions built-in functions
|
||||
* and use __builtin_convertvector if clang missing any of vector conversions built-in functions
|
||||
*
|
||||
* todo: clang asm template bug is fixed, need to reconsider the current workarounds.
|
||||
*/
|
||||
@ -491,7 +491,7 @@ VSX_IMPL_CONV_EVEN_2_4(vec_uint4, vec_double2, vec_ctu, vec_ctuo)
|
||||
// Only for Eigen!
|
||||
/*
|
||||
* changing behavior of conversion intrinsics for gcc has effect on Eigen
|
||||
* so we redfine old behavior again only on gcc, clang
|
||||
* so we redefine old behavior again only on gcc, clang
|
||||
*/
|
||||
#if !defined(__clang__) || __clang_major__ > 4
|
||||
// ignoring second arg since Eigen only truncates toward zero
|
||||
|
@ -250,7 +250,7 @@ cvInitMatNDHeader( CvMatND* mat, int dims, const int* sizes,
|
||||
for( int i = dims - 1; i >= 0; i-- )
|
||||
{
|
||||
if( sizes[i] < 0 )
|
||||
CV_Error( CV_StsBadSize, "one of dimesion sizes is non-positive" );
|
||||
CV_Error( CV_StsBadSize, "one of dimension sizes is non-positive" );
|
||||
mat->dim[i].size = sizes[i];
|
||||
if( step > INT_MAX )
|
||||
CV_Error( CV_StsOutOfRange, "The array is too big" );
|
||||
@ -545,7 +545,7 @@ cvCreateSparseMat( int dims, const int* sizes, int type )
|
||||
for( i = 0; i < dims; i++ )
|
||||
{
|
||||
if( sizes[i] <= 0 )
|
||||
CV_Error( CV_StsBadSize, "one of dimesion sizes is non-positive" );
|
||||
CV_Error( CV_StsBadSize, "one of dimension sizes is non-positive" );
|
||||
}
|
||||
|
||||
CvSparseMat* arr = (CvSparseMat*)cvAlloc(sizeof(*arr)+MAX(0,dims-CV_MAX_DIM)*sizeof(arr->size[0]));
|
||||
|
@ -53,7 +53,7 @@ cvtabs_32f( const _Ts* src, size_t sstep, _Td* dst, size_t dstep,
|
||||
}
|
||||
}
|
||||
|
||||
// variant for convrsions 16f <-> ... w/o unrolling
|
||||
// variant for conversions 16f <-> ... w/o unrolling
|
||||
template<typename _Ts, typename _Td> inline void
|
||||
cvtabs1_32f( const _Ts* src, size_t sstep, _Td* dst, size_t dstep,
|
||||
Size size, float a, float b )
|
||||
@ -123,7 +123,7 @@ cvt_32f( const _Ts* src, size_t sstep, _Td* dst, size_t dstep,
|
||||
}
|
||||
}
|
||||
|
||||
// variant for convrsions 16f <-> ... w/o unrolling
|
||||
// variant for conversions 16f <-> ... w/o unrolling
|
||||
template<typename _Ts, typename _Td> inline void
|
||||
cvt1_32f( const _Ts* src, size_t sstep, _Td* dst, size_t dstep,
|
||||
Size size, float a, float b )
|
||||
|
@ -77,7 +77,7 @@ Replaced y(1,ndim,0.0) ------> y(1,ndim+1,0.0)
|
||||
|
||||
***********************************************************************************************************************************
|
||||
|
||||
The code below was used in tesing the source code.
|
||||
The code below was used in testing the source code.
|
||||
Created by @SareeAlnaghy
|
||||
|
||||
#include <iostream>
|
||||
|
@ -1519,7 +1519,7 @@ public:
|
||||
{
|
||||
TlsAbstraction* tls = getTlsAbstraction();
|
||||
if (NULL == tls)
|
||||
return; // TLS signleton is not available (terminated)
|
||||
return; // TLS singleton is not available (terminated)
|
||||
ThreadData *pTD = tlsValue == NULL ? (ThreadData*)tls->getData() : (ThreadData*)tlsValue;
|
||||
if (pTD == NULL)
|
||||
return; // no OpenCV TLS data for this thread
|
||||
@ -1610,7 +1610,7 @@ public:
|
||||
|
||||
TlsAbstraction* tls = getTlsAbstraction();
|
||||
if (NULL == tls)
|
||||
return NULL; // TLS signleton is not available (terminated)
|
||||
return NULL; // TLS singleton is not available (terminated)
|
||||
|
||||
ThreadData* threadData = (ThreadData*)tls->getData();
|
||||
if(threadData && threadData->slots.size() > slotIdx)
|
||||
@ -1646,7 +1646,7 @@ public:
|
||||
|
||||
TlsAbstraction* tls = getTlsAbstraction();
|
||||
if (NULL == tls)
|
||||
return; // TLS signleton is not available (terminated)
|
||||
return; // TLS singleton is not available (terminated)
|
||||
|
||||
ThreadData* threadData = (ThreadData*)tls->getData();
|
||||
if(!threadData)
|
||||
|
@ -134,7 +134,7 @@ CV__DNN_INLINE_NS_BEGIN
|
||||
virtual void setOutShape(const MatShape &outTailShape = MatShape()) = 0;
|
||||
|
||||
/** @deprecated Use flag `produce_cell_output` in LayerParams.
|
||||
* @brief Specifies either interpret first dimension of input blob as timestamp dimenion either as sample.
|
||||
* @brief Specifies either interpret first dimension of input blob as timestamp dimension either as sample.
|
||||
*
|
||||
* If flag is set to true then shape of input blob will be interpreted as [`T`, `N`, `[data dims]`] where `T` specifies number of timestamps, `N` is number of independent streams.
|
||||
* In this case each forward() call will iterate through `T` timestamps and update layer's state `T` times.
|
||||
|
@ -84,7 +84,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
|
||||
* Reasoning:
|
||||
* ----------
|
||||
* Suppose an item's indices in the output tensor is [o1, o2, ...]. The indices in the input
|
||||
* tensor will be [o1 + off1, o2 + off2, ...]. The rest of the elements in the input are igored.
|
||||
* tensor will be [o1 + off1, o2 + off2, ...]. The rest of the elements in the input are ignored.
|
||||
*
|
||||
* If the size of the first axis of the input and output tensor is unity, the input and output indices
|
||||
* for all the elements will be of the form be [0, o2 + off2, ...] and [0, o2, ...] respectively. Note that
|
||||
|
@ -227,7 +227,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl { namespace cu
|
||||
if (std::is_same<T, half>::value)
|
||||
CUDA4DNN_CHECK_CUDNN(cudnnSetConvolutionMathType(descriptor, CUDNN_TENSOR_OP_MATH));
|
||||
} catch (...) {
|
||||
/* cudnnDestroyConvolutionDescriptor will not fail for a valid desriptor object */
|
||||
/* cudnnDestroyConvolutionDescriptor will not fail for a valid descriptor object */
|
||||
CUDA4DNN_CHECK_CUDNN(cudnnDestroyConvolutionDescriptor(descriptor));
|
||||
throw;
|
||||
}
|
||||
|
@ -266,7 +266,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl {
|
||||
|
||||
/** page-locks \p size_in_bytes bytes of memory starting from \p ptr_
|
||||
*
|
||||
* Pre-conditons:
|
||||
* Pre-conditions:
|
||||
* - host memory should be unregistered
|
||||
*/
|
||||
MemoryLockGuard(void* ptr_, std::size_t size_in_bytes) {
|
||||
|
@ -33,7 +33,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl {
|
||||
*
|
||||
* A `DevicePtr<T>` can implicitly convert to `DevicePtr<const T>`.
|
||||
*
|
||||
* Specalizations:
|
||||
* Specializations:
|
||||
* - DevicePtr<void>/DevicePtr<const void> do not support pointer arithmetic (but relational operators are provided)
|
||||
* - any device pointer pointing to mutable memory is implicitly convertible to DevicePtr<void>
|
||||
* - any device pointer is implicitly convertible to DevicePtr<const void>
|
||||
|
@ -67,7 +67,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl {
|
||||
*/
|
||||
template <class T>
|
||||
class Tensor {
|
||||
static_assert(std::is_standard_layout<T>::value, "T must staisfy StandardLayoutType");
|
||||
static_assert(std::is_standard_layout<T>::value, "T must satisfy StandardLayoutType");
|
||||
|
||||
public:
|
||||
using value_type = typename ManagedPtr<T>::element_type;
|
||||
@ -553,7 +553,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl {
|
||||
* - [start, end) represents a forward range containing the length of the axes in order
|
||||
* - the number of axis lengths must be less than or equal to the rank
|
||||
* - at most one axis length is allowed for length deduction
|
||||
* - the lengths provided must ensure that the total number of elements remains unchnged
|
||||
* - the lengths provided must ensure that the total number of elements remains unchanged
|
||||
*
|
||||
* Exception Guarantee: Strong
|
||||
*/
|
||||
@ -898,7 +898,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl {
|
||||
* - [start, end) represents a forward range containing length of the axes in order starting from axis zero
|
||||
* - the number of axis lengths must be less than or equal to the tensor rank
|
||||
* - at most one axis length is allowed for length deduction
|
||||
* - the lengths provided must ensure that the total number of elements remains unchnged
|
||||
* - the lengths provided must ensure that the total number of elements remains unchanged
|
||||
*
|
||||
* Exception Guarantee: Strong
|
||||
*/
|
||||
|
@ -35,7 +35,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl {
|
||||
* Pre-conditions:
|
||||
* - \p dest and \p src must have the same shape
|
||||
*
|
||||
* Exception Gaurantee: Basic
|
||||
* Exception Guarantee: Basic
|
||||
*/
|
||||
template <class T> inline
|
||||
void copy(const Stream& stream, TensorSpan<T> dest, TensorView<T> src) {
|
||||
@ -50,7 +50,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl {
|
||||
* - \p A and \p B must meet the mathematical requirements for matrix multiplication
|
||||
* - \p result must be large enough to hold the result
|
||||
*
|
||||
* Exception Gaurantee: Basic
|
||||
* Exception Guarantee: Basic
|
||||
*/
|
||||
template <class T> inline
|
||||
void gemm(const cublas::Handle& handle, T beta, TensorSpan<T> result, T alpha, bool transa, TensorView<T> A, bool transb, TensorView<T> B) {
|
||||
@ -108,7 +108,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl {
|
||||
* Pre-conditions:
|
||||
* - \p A and \p result must be compatible tensors
|
||||
*
|
||||
* Exception Gaurantee: Basic
|
||||
* Exception Guarantee: Basic
|
||||
*/
|
||||
template <class T> inline
|
||||
void softmax(const cudnn::Handle& handle, TensorSpan<T> output, TensorView<T> input, int channel_axis, bool log) {
|
||||
|
@ -103,7 +103,7 @@ namespace cv { namespace dnn { namespace cuda4dnn {
|
||||
CV_Assert(pooling_order == pads_end.size());
|
||||
|
||||
/* cuDNN rounds down by default; hence, if ceilMode is false, we do nothing
|
||||
* otherwise, we add extra padding towards the end so that the convolution arithmetic yeilds
|
||||
* otherwise, we add extra padding towards the end so that the convolution arithmetic yields
|
||||
* the correct output size without having to deal with fancy fractional sizes
|
||||
*/
|
||||
auto pads_end_modified = pads_end;
|
||||
|
@ -622,7 +622,7 @@ void InfEngineNgraphNet::forward(const std::vector<Ptr<BackendWrapper> >& outBlo
|
||||
try {
|
||||
wrapper->outProms[processedOutputs].setException(std::current_exception());
|
||||
} catch(...) {
|
||||
CV_LOG_ERROR(NULL, "DNN: Exception occured during async inference exception propagation");
|
||||
CV_LOG_ERROR(NULL, "DNN: Exception occurred during async inference exception propagation");
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -635,7 +635,7 @@ void InfEngineNgraphNet::forward(const std::vector<Ptr<BackendWrapper> >& outBlo
|
||||
try {
|
||||
wrapper->outProms[processedOutputs].setException(e);
|
||||
} catch(...) {
|
||||
CV_LOG_ERROR(NULL, "DNN: Exception occured during async inference exception propagation");
|
||||
CV_LOG_ERROR(NULL, "DNN: Exception occurred during async inference exception propagation");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -116,7 +116,7 @@ message AttributeProto {
|
||||
|
||||
// The type field MUST be present for this version of the IR.
|
||||
// For 0.0.1 versions of the IR, this field was not defined, and
|
||||
// implementations needed to use has_field hueristics to determine
|
||||
// implementations needed to use has_field heuristics to determine
|
||||
// which value field was in use. For IR_VERSION 0.0.2 or later, this
|
||||
// field MUST be set and match the f|i|s|t|... field in use. This
|
||||
// change was made to accommodate proto3 implementations.
|
||||
@ -323,7 +323,7 @@ message TensorProto {
|
||||
// For float and complex64 values
|
||||
// Complex64 tensors are encoded as a single array of floats,
|
||||
// with the real components appearing in odd numbered positions,
|
||||
// and the corresponding imaginary component apparing in the
|
||||
// and the corresponding imaginary component appearing in the
|
||||
// subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i]
|
||||
// is encoded as [1.0, 2.0 ,3.0 ,4.0]
|
||||
// When this field is present, the data_type field MUST be FLOAT or COMPLEX64.
|
||||
@ -373,7 +373,7 @@ message TensorProto {
|
||||
// For double
|
||||
// Complex64 tensors are encoded as a single array of doubles,
|
||||
// with the real components appearing in odd numbered positions,
|
||||
// and the corresponding imaginary component apparing in the
|
||||
// and the corresponding imaginary component appearing in the
|
||||
// subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i]
|
||||
// is encoded as [1.0, 2.0 ,3.0 ,4.0]
|
||||
// When this field is present, the data_type field MUST be DOUBLE or COMPLEX128
|
||||
|
@ -350,7 +350,7 @@ namespace cv { namespace dnn {
|
||||
|
||||
private:
|
||||
/* The same tensor memory can be reused by different layers whenever possible.
|
||||
* Hence, it is possible for different backend warppers to point to the same memory.
|
||||
* Hence, it is possible for different backend wrappers to point to the same memory.
|
||||
* However, it may use only a part of that memory and have a different shape.
|
||||
*
|
||||
* We store the common information such as device tensor and its corresponding host memory in
|
||||
|
@ -243,7 +243,7 @@ Context::Context()
|
||||
queueCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
|
||||
queueCreateInfo.queueFamilyIndex = kQueueFamilyIndex;
|
||||
queueCreateInfo.queueCount = 1; // create one queue in this family. We don't need more.
|
||||
float queuePriorities = 1.0; // we only have one queue, so this is not that imporant.
|
||||
float queuePriorities = 1.0; // we only have one queue, so this is not that important.
|
||||
queueCreateInfo.pQueuePriorities = &queuePriorities;
|
||||
|
||||
VkDeviceCreateInfo deviceCreateInfo = {};
|
||||
|
@ -398,7 +398,7 @@ code which is distributed under GPL.
|
||||
class CV_EXPORTS_W MSER : public Feature2D
|
||||
{
|
||||
public:
|
||||
/** @brief Full consturctor for %MSER detector
|
||||
/** @brief Full constructor for %MSER detector
|
||||
|
||||
@param _delta it compares \f$(size_{i}-size_{i-delta})/size_{i-delta}\f$
|
||||
@param _min_area prune the area which smaller than minArea
|
||||
|
@ -36,7 +36,7 @@ void image_derivatives_scharr(const cv::Mat& src, cv::Mat& dst, int xorder, int
|
||||
// Nonlinear diffusion filtering scalar step
|
||||
void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float stepsize);
|
||||
|
||||
// For non-maxima suppresion
|
||||
// For non-maxima suppression
|
||||
bool check_maximum_neighbourhood(const cv::Mat& img, int dsize, float value, int row, int col, bool same_img);
|
||||
|
||||
// Image downsampling
|
||||
|
@ -983,7 +983,7 @@ extractMSER_8uC3( const Mat& src,
|
||||
double s = (double)(lr->size-lr->sizei)/(lr->dt-lr->di);
|
||||
if ( s < lr->s )
|
||||
{
|
||||
// skip the first one and check stablity
|
||||
// skip the first one and check stability
|
||||
if ( i > lr->reinit+1 && MSCRStableCheck( lr, params ) )
|
||||
{
|
||||
if ( lr->tmsr == NULL )
|
||||
|
@ -131,7 +131,7 @@ float optimizeSimplexDownhill(T* points, int n, F func, float* vals = NULL )
|
||||
}
|
||||
|
||||
if (val_r<vals[0]) {
|
||||
// value is smaller than smalest in simplex
|
||||
// value is smaller than smallest in simplex
|
||||
|
||||
// expand some more to see if it drops further
|
||||
for (int i=0; i<n; ++i) {
|
||||
|
@ -95,7 +95,7 @@ Internally, cv::GComputation::apply() compiles the captured graph for
|
||||
the given input parameters and executes the compiled graph on data
|
||||
immediately.
|
||||
|
||||
There is a number important concepts can be outlines with this examle:
|
||||
There is a number important concepts can be outlines with this example:
|
||||
* Graph declaration and graph execution are distinct steps;
|
||||
* Graph is built implicitly from a sequence of G-API expressions;
|
||||
* G-API supports function-like calls -- e.g. cv::gapi::resize(), and
|
||||
|
@ -36,7 +36,7 @@ software optimization due to diffent costs of memory access on modern
|
||||
computer architectures -- the more data is reused in the first level
|
||||
cache, the more efficient pipeline is.
|
||||
|
||||
Definitely the aforementioned techinques can be applied manually --
|
||||
Definitely the aforementioned techniques can be applied manually --
|
||||
but it requires extra skills and knowledge of the target platform and
|
||||
the algorithm implementation changes irrevocably -- becoming more
|
||||
specific, less flexible, and harder to extend and maintain.
|
||||
|
@ -242,7 +242,7 @@ Graph *protocol* defines what arguments a computation was defined on
|
||||
- A type name (every operation is a C++ type);
|
||||
- Operation signature (similar to ~std::function<>~);
|
||||
- Operation identifier (a string);
|
||||
- Metadata callback -- desribe what is the output value format(s),
|
||||
- Metadata callback -- describe what is the output value format(s),
|
||||
given the input and arguments.
|
||||
- Use ~OpType::on(...)~ to use a new kernel ~OpType~ to construct graphs.
|
||||
#+LaTeX: {\footnotesize
|
||||
|
@ -124,7 +124,7 @@ protected:
|
||||
F m_f;
|
||||
};
|
||||
|
||||
// FIXME: This is an ugly ad-hoc imlpementation. TODO: refactor
|
||||
// FIXME: This is an ugly ad-hoc implementation. TODO: refactor
|
||||
|
||||
namespace detail
|
||||
{
|
||||
|
@ -39,7 +39,7 @@ namespace fluid
|
||||
*/
|
||||
GAPI_EXPORTS cv::gapi::GBackend backend();
|
||||
/** @} */
|
||||
} // namespace flud
|
||||
} // namespace fluid
|
||||
} // namespace gapi
|
||||
|
||||
|
||||
|
@ -148,7 +148,7 @@ public:
|
||||
* @param outs vector of output cv::Mat objects to produce by the
|
||||
* computation.
|
||||
*
|
||||
* Numbers of elements in ins/outs vectos must match numbers of
|
||||
* Numbers of elements in ins/outs vectors must match numbers of
|
||||
* inputs/outputs which were used to define the source GComputation.
|
||||
*/
|
||||
void operator() (const std::vector<cv::Mat> &ins, // Compatibility overload
|
||||
|
@ -314,7 +314,7 @@ public:
|
||||
* @param args compilation arguments for underlying compilation
|
||||
* process.
|
||||
*
|
||||
* Numbers of elements in ins/outs vectos must match numbers of
|
||||
* Numbers of elements in ins/outs vectors must match numbers of
|
||||
* inputs/outputs which were used to define this GComputation.
|
||||
*/
|
||||
void apply(const std::vector<cv::Mat>& ins, // Compatibility overload
|
||||
@ -373,7 +373,7 @@ public:
|
||||
// template<typename... Ts>
|
||||
// GCompiled compile(const Ts&... metas, GCompileArgs &&args)
|
||||
//
|
||||
// But not all compilers can hande this (and seems they shouldn't be able to).
|
||||
// But not all compilers can handle this (and seems they shouldn't be able to).
|
||||
// FIXME: SFINAE looks ugly in the generated documentation
|
||||
/**
|
||||
* @overload
|
||||
|
@ -101,7 +101,7 @@ namespace detail
|
||||
template<> struct GTypeOf<cv::gapi::own::Scalar> { using type = cv::GScalar; };
|
||||
template<typename U> struct GTypeOf<std::vector<U> > { using type = cv::GArray<U>; };
|
||||
// FIXME: This is not quite correct since IStreamSource may produce not only Mat but also Scalar
|
||||
// and vector data. TODO: Extend the type dispatchig on these types too.
|
||||
// and vector data. TODO: Extend the type dispatching on these types too.
|
||||
template<> struct GTypeOf<cv::gapi::wip::IStreamSource::Ptr> { using type = cv::GMat;};
|
||||
template<class T> using g_type_of_t = typename GTypeOf<T>::type;
|
||||
|
||||
|
@ -94,7 +94,7 @@ protected:
|
||||
F m_f;
|
||||
};
|
||||
|
||||
// FIXME: This is an ugly ad-hoc imlpementation. TODO: refactor
|
||||
// FIXME: This is an ugly ad-hoc implementation. TODO: refactor
|
||||
|
||||
namespace detail
|
||||
{
|
||||
|
@ -35,7 +35,7 @@ namespace wip {
|
||||
* This class implements IStreamSource interface.
|
||||
* Its constructor takes the same parameters as cv::VideoCapture does.
|
||||
*
|
||||
* Please make sure that videoio OpenCV module is avaiable before using
|
||||
* Please make sure that videoio OpenCV module is available before using
|
||||
* this in your application (G-API doesn't depend on it directly).
|
||||
*
|
||||
* @note stream sources are passed to G-API via shared pointers, so
|
||||
|
@ -7,7 +7,7 @@
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
if (argc < 2) {
|
||||
std::cerr << "Filename requried" << std::endl;
|
||||
std::cerr << "Filename required" << std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -61,13 +61,13 @@ cv::Size cv::gapi::wip::draw::FTTextRender::Priv::getTextSize(const std::wstring
|
||||
// or decrement (for right-to-left writing) the pen position after a
|
||||
// glyph has been rendered when processing text
|
||||
//
|
||||
// widht (bitmap->width) - The width of glyph
|
||||
// width (bitmap->width) - The width of glyph
|
||||
//
|
||||
//
|
||||
// Algorihm to compute size of the text bounding box:
|
||||
// Algorithm to compute size of the text bounding box:
|
||||
//
|
||||
// 1) Go through all symbols and shift pen position and save glyph parameters (left, advance, width)
|
||||
// If left + pen postion < 0 set left to 0. For example it's maybe happened
|
||||
// If left + pen position < 0 set left to 0. For example it's maybe happened
|
||||
// if we print first letter 'J' or any other letter with negative 'left'
|
||||
// We want to render glyph in pen position + left, so we must't allow it to be negative
|
||||
//
|
||||
|
@ -184,7 +184,7 @@ void drawPrimitivesOCV(cv::Mat& in,
|
||||
cv::Point org(0, mask.rows - baseline);
|
||||
cv::putText(mask, tp.text, org, tp.ff, tp.fs, 255, tp.thick);
|
||||
|
||||
// Org is bottom left point, trasform it to top left point for blendImage
|
||||
// Org is bottom left point, transform it to top left point for blendImage
|
||||
cv::Point tl(tp.org.x, tp.org.y - mask.size().height + baseline);
|
||||
|
||||
blendTextMask(in, mask, tl, tp.color);
|
||||
@ -208,7 +208,7 @@ void drawPrimitivesOCV(cv::Mat& in,
|
||||
cv::Point org(0, mask.rows - baseline);
|
||||
ftpr->putText(mask, ftp.text, org, ftp.fh);
|
||||
|
||||
// Org is bottom left point, trasform it to top left point for blendImage
|
||||
// Org is bottom left point, transform it to top left point for blendImage
|
||||
cv::Point tl(ftp.org.x, ftp.org.y - mask.size().height + baseline);
|
||||
|
||||
blendTextMask(in, mask, tl, color);
|
||||
|
@ -1823,7 +1823,7 @@ GAPI_FLUID_KERNEL(GFluidBayerGR2RGB, cv::gapi::imgproc::GBayerGR2RGB, false)
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace fliud
|
||||
} // namespace fluid
|
||||
} // namespace gapi
|
||||
} // namespace cv
|
||||
|
||||
|
@ -209,7 +209,7 @@ RUN_MEDBLUR3X3_IMPL( float)
|
||||
|
||||
#undef RUN_MEDBLUR3X3_IMPL
|
||||
|
||||
} // namespace fliud
|
||||
} // namespace fluid
|
||||
} // namespace gapi
|
||||
} // namespace cv
|
||||
|
||||
|
@ -25,7 +25,7 @@ using cv::gapi::own::rintd;
|
||||
|
||||
//--------------------------------
|
||||
//
|
||||
// Macros for mappig of data types
|
||||
// Macros for mapping of data types
|
||||
//
|
||||
//--------------------------------
|
||||
|
||||
|
@ -185,7 +185,7 @@ struct IEUnit {
|
||||
// The practice shows that not all inputs and not all outputs
|
||||
// are mandatory to specify in IE model.
|
||||
// So what we're concerned here about is:
|
||||
// if opeation's (not topology's) input/output number is
|
||||
// if operation's (not topology's) input/output number is
|
||||
// greater than 1, then we do care about input/output layer
|
||||
// names. Otherwise, names are picked up automatically.
|
||||
// TODO: Probably this check could be done at the API entry point? (gnet)
|
||||
|
@ -15,7 +15,7 @@
|
||||
|
||||
namespace cv { namespace gimpl {
|
||||
|
||||
// NB: This is what a "Kernel Package" from the origianl Wiki doc should be.
|
||||
// NB: This is what a "Kernel Package" from the original Wiki doc should be.
|
||||
void loadOCLImgProc(std::map<std::string, cv::GOCLKernel> &kmap);
|
||||
|
||||
}}
|
||||
|
@ -32,7 +32,7 @@ namespace
|
||||
//
|
||||
// In this case, Data object is part of Island A if and only if:
|
||||
// - Data object's producer is part of Island A,
|
||||
// - AND any of Data obejct's consumers is part of Island A.
|
||||
// - AND any of Data object's consumers is part of Island A.
|
||||
//
|
||||
// Op["island0"] --> Data[ ? ] --> Op["island0"]
|
||||
// :
|
||||
@ -147,7 +147,7 @@ void cv::gimpl::passes::checkIslands(ade::passes::PassContext &ctx)
|
||||
|
||||
// Run the recursive traversal process as described in 5/a-d.
|
||||
// This process is like a flood-fill traversal for island.
|
||||
// If there's to distint successful flood-fills happened for the same island
|
||||
// If there's to distinct successful flood-fills happened for the same island
|
||||
// name, there are two islands with this name.
|
||||
std::stack<ade::NodeHandle> stack;
|
||||
stack.push(tagged_nh);
|
||||
|
@ -198,7 +198,7 @@ void sync_data(cv::GRunArgs &results, cv::GRunArgsP &outputs)
|
||||
// "Stop" is received.
|
||||
//
|
||||
// Queue reader is the class which encapsulates all this logic and
|
||||
// provies threads with a managed storage and an easy API to obtain
|
||||
// provides threads with a managed storage and an easy API to obtain
|
||||
// data.
|
||||
class QueueReader
|
||||
{
|
||||
|
@ -67,7 +67,7 @@ inline std::ostream& operator<<(std::ostream& os, bitwiseOp op)
|
||||
// initMatsRandU - function that is used to initialize input/output data
|
||||
// FIXTURE_API(mathOp,bool,double,bool) - test-specific parameters (types)
|
||||
// 4 - number of test-specific parameters
|
||||
// opType, testWithScalar, scale, doReverseOp - test-spcific parameters (names)
|
||||
// opType, testWithScalar, scale, doReverseOp - test-specific parameters (names)
|
||||
//
|
||||
// We get:
|
||||
// 1. Default parameters: int type, cv::Size sz, int dtype, getCompileArgs() function
|
||||
|
@ -294,7 +294,7 @@ TEST_P(Polar2CartTest, AccuracyTest)
|
||||
// expect of single-precision elementary functions implementation.
|
||||
//
|
||||
// However, good idea is making such threshold configurable: parameter
|
||||
// of this test - which a specific test istantiation could setup.
|
||||
// of this test - which a specific test instantiation could setup.
|
||||
//
|
||||
// Note that test instantiation for the OpenCV back-end could even let
|
||||
// the threshold equal to zero, as CV back-end calls the same kernel.
|
||||
@ -340,7 +340,7 @@ TEST_P(Cart2PolarTest, AccuracyTest)
|
||||
// expect of single-precision elementary functions implementation.
|
||||
//
|
||||
// However, good idea is making such threshold configurable: parameter
|
||||
// of this test - which a specific test istantiation could setup.
|
||||
// of this test - which a specific test instantiation could setup.
|
||||
//
|
||||
// Note that test instantiation for the OpenCV back-end could even let
|
||||
// the threshold equal to zero, as CV back-end calls the same kernel.
|
||||
|
@ -19,7 +19,7 @@ namespace opencv_test
|
||||
// initMatrixRandN - function that is used to initialize input/output data
|
||||
// FIXTURE_API(CompareMats,int,int) - test-specific parameters (types)
|
||||
// 3 - number of test-specific parameters
|
||||
// cmpF, kernSize, borderType - test-spcific parameters (names)
|
||||
// cmpF, kernSize, borderType - test-specific parameters (names)
|
||||
//
|
||||
// We get:
|
||||
// 1. Default parameters: int type, cv::Size sz, int dtype, getCompileArgs() function
|
||||
|
@ -426,7 +426,7 @@ struct output_args_lifetime : ::testing::Test{
|
||||
static constexpr const int num_of_requests = 20;
|
||||
};
|
||||
TYPED_TEST_CASE_P(output_args_lifetime);
|
||||
//There are intentionaly no actual checks (asserts and verify) in output_args_lifetime tests.
|
||||
//There are intentionally no actual checks (asserts and verify) in output_args_lifetime tests.
|
||||
//They are more of example use-cases than real tests. (ASAN/valgrind can still catch issues here)
|
||||
TYPED_TEST_P(output_args_lifetime, callback){
|
||||
|
||||
|
@ -64,7 +64,7 @@ TEST(GAPI, Mat_Recreate)
|
||||
EXPECT_EQ(m3.at<uchar>(0, 0), m4.at<uchar>(0, 0));
|
||||
|
||||
// cv::Mat::create must be NOOP if we don't change the meta,
|
||||
// even if the origianl mat is created from handle.
|
||||
// even if the original mat is created from handle.
|
||||
m4.create(3, 3, CV_8U);
|
||||
EXPECT_EQ(m3.rows, m4.rows);
|
||||
EXPECT_EQ(m3.cols, m4.cols);
|
||||
|
@ -1151,7 +1151,7 @@ CVAPI(CvScalar) cvColorToScalar( double packed_color, int arrtype );
|
||||
/** @brief Returns the polygon points which make up the given ellipse.
|
||||
|
||||
The ellipse is define by the box of size 'axes' rotated 'angle' around the 'center'. A partial
|
||||
sweep of the ellipse arc can be done by spcifying arc_start and arc_end to be something other than
|
||||
sweep of the ellipse arc can be done by specifying arc_start and arc_end to be something other than
|
||||
0 and 360, respectively. The input array 'pts' must be large enough to hold the result. The total
|
||||
number of points stored into 'pts' is returned by this function.
|
||||
@see cv::ellipse2Poly
|
||||
|
@ -630,7 +630,7 @@ approxPolyDP_( const Point_<T>* src_contour, int count0, Point_<T>* dst_contour,
|
||||
WRITE_PT( src_contour[count-1] );
|
||||
|
||||
// last stage: do final clean-up of the approximated contour -
|
||||
// remove extra points on the [almost] stright lines.
|
||||
// remove extra points on the [almost] straight lines.
|
||||
is_closed = is_closed0;
|
||||
count = new_count;
|
||||
pos = is_closed ? count - 1 : 0;
|
||||
|
@ -776,7 +776,7 @@ cv::RotatedRect cv::fitEllipseDirect( InputArray _points )
|
||||
namespace cv
|
||||
{
|
||||
|
||||
// Calculates bounding rectagnle of a point set or retrieves already calculated
|
||||
// Calculates bounding rectangle of a point set or retrieves already calculated
|
||||
static Rect pointSetBoundingRect( const Mat& points )
|
||||
{
|
||||
int npoints = points.checkVector(2);
|
||||
@ -1392,7 +1392,7 @@ cvFitEllipse2( const CvArr* array )
|
||||
return cvBox2D(cv::fitEllipse(points));
|
||||
}
|
||||
|
||||
/* Calculates bounding rectagnle of a point set or retrieves already calculated */
|
||||
/* Calculates bounding rectangle of a point set or retrieves already calculated */
|
||||
CV_IMPL CvRect
|
||||
cvBoundingRect( CvArr* array, int update )
|
||||
{
|
||||
|
@ -325,7 +325,7 @@ void CV_ApproxPolyTest::run( int /*start_from*/ )
|
||||
if( DstSeq == NULL )
|
||||
{
|
||||
ts->printf( cvtest::TS::LOG,
|
||||
"cvApproxPoly returned NULL for contour #%d, espilon = %g\n", i, Eps );
|
||||
"cvApproxPoly returned NULL for contour #%d, epsilon = %g\n", i, Eps );
|
||||
code = cvtest::TS::FAIL_INVALID_OUTPUT;
|
||||
goto _exit_;
|
||||
} // if( DstSeq == NULL )
|
||||
|
@ -60,7 +60,7 @@ namespace opencv_test { namespace {
|
||||
// 6 - partial intersection, rectangle on top of different size
|
||||
// 7 - full intersection, rectangle fully enclosed in the other
|
||||
// 8 - partial intersection, rectangle corner just touching. point contact
|
||||
// 9 - partial intersetion. rectangle side by side, line contact
|
||||
// 9 - partial intersection. rectangle side by side, line contact
|
||||
|
||||
static void compare(const std::vector<Point2f>& test, const std::vector<Point2f>& target)
|
||||
{
|
||||
|
@ -44,7 +44,7 @@ foreach(file ${seed_project_files_rel})
|
||||
endforeach()
|
||||
|
||||
list(APPEND depends gen_opencv_java_source "${OPENCV_DEPHELPER}/gen_opencv_java_source")
|
||||
ocv_copyfiles_add_target(${the_module}_android_source_copy JAVA_SRC_COPY "Copy Java(Andoid SDK) source files" ${depends})
|
||||
ocv_copyfiles_add_target(${the_module}_android_source_copy JAVA_SRC_COPY "Copy Java(Android SDK) source files" ${depends})
|
||||
file(REMOVE "${OPENCV_DEPHELPER}/${the_module}_android_source_copy") # force rebuild after CMake run
|
||||
|
||||
set(depends ${the_module}_android_source_copy "${OPENCV_DEPHELPER}/${the_module}_android_source_copy")
|
||||
@ -134,7 +134,7 @@ foreach(file ${__files_rel})
|
||||
endforeach()
|
||||
|
||||
list(APPEND depends gen_opencv_java_source "${OPENCV_DEPHELPER}/gen_opencv_java_source")
|
||||
ocv_copyfiles_add_target(${the_module}_android_source_copy JAVA_SRC_COPY "Copy Java(Andoid SDK) source files" ${depends})
|
||||
ocv_copyfiles_add_target(${the_module}_android_source_copy JAVA_SRC_COPY "Copy Java(Android SDK) source files" ${depends})
|
||||
file(REMOVE "${OPENCV_DEPHELPER}/${the_module}_android_source_copy") # force rebuild after CMake run
|
||||
|
||||
set(depends ${the_module}_android_source_copy "${OPENCV_DEPHELPER}/${the_module}_android_source_copy")
|
||||
|
@ -248,7 +248,7 @@ public abstract class CameraBridgeViewBase extends SurfaceView implements Surfac
|
||||
|
||||
/**
|
||||
* This method is provided for clients, so they can disable camera connection and stop
|
||||
* the delivery of frames even though the surface view itself is not destroyed and still stays on the scren
|
||||
* the delivery of frames even though the surface view itself is not destroyed and still stays on the screen
|
||||
*/
|
||||
public void disableView() {
|
||||
synchronized(mSyncObject) {
|
||||
|
@ -32,4 +32,4 @@ To run performance tests, please launch a local web server in <build_dir>/bin fo
|
||||
|
||||
Navigate the web browser to the kernel page you want to test, like http://localhost:8080/perf/imgproc/cvtcolor.html.
|
||||
|
||||
You can input the paramater, and then click the `Run` button to run the specific case, or it will run all the cases.
|
||||
You can input the parameter, and then click the `Run` button to run the specific case, or it will run all the cases.
|
||||
|
@ -1683,7 +1683,7 @@ public:
|
||||
|
||||
/** @brief This function returns the trained parameters arranged across rows.
|
||||
|
||||
For a two class classifcation problem, it returns a row matrix. It returns learnt parameters of
|
||||
For a two class classification problem, it returns a row matrix. It returns learnt parameters of
|
||||
the Logistic Regression as a matrix of type CV_32F.
|
||||
*/
|
||||
CV_WRAP virtual Mat get_learnt_thetas() const = 0;
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env python
|
||||
"""Algorithm serializaion test."""
|
||||
"""Algorithm serialization test."""
|
||||
import tempfile
|
||||
import os
|
||||
import cv2 as cv
|
||||
|
@ -181,7 +181,7 @@ class cuda_test(NewOpenCVTests):
|
||||
self.assertTrue('GpuMat' in str(type(gpu_mat)), msg=type(gpu_mat))
|
||||
#TODO: print(cv.utils.dumpInputArray(gpu_mat)) # - no support for GpuMat
|
||||
|
||||
# not checking output, therefore sepearate tests for different signatures is unecessary
|
||||
# not checking output, therefore sepearate tests for different signatures is unnecessary
|
||||
ret, _gpu_mat2 = reader.nextFrame(gpu_mat)
|
||||
#TODO: self.assertTrue(gpu_mat == gpu_mat2)
|
||||
self.assertTrue(ret)
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env python
|
||||
""""Core serializaion tests."""
|
||||
""""Core serialization tests."""
|
||||
import tempfile
|
||||
import os
|
||||
import cv2 as cv
|
||||
|
@ -215,14 +215,14 @@ finds two best matches for each feature and leaves the best one only if the
|
||||
ratio between descriptor distances is greater than the threshold match_conf.
|
||||
|
||||
Unlike cv::detail::BestOf2NearestMatcher this matcher uses affine
|
||||
transformation (affine trasformation estimate will be placed in matches_info).
|
||||
transformation (affine transformation estimate will be placed in matches_info).
|
||||
|
||||
@sa cv::detail::FeaturesMatcher cv::detail::BestOf2NearestMatcher
|
||||
*/
|
||||
class CV_EXPORTS_W AffineBestOf2NearestMatcher : public BestOf2NearestMatcher
|
||||
{
|
||||
public:
|
||||
/** @brief Constructs a "best of 2 nearest" matcher that expects affine trasformation
|
||||
/** @brief Constructs a "best of 2 nearest" matcher that expects affine transformation
|
||||
between images
|
||||
|
||||
@param full_affine whether to use full affine transformation with 6 degress of freedom or reduced
|
||||
|
@ -11367,7 +11367,7 @@ void UniversalTersePrint(const T& value, ::std::ostream* os) {
|
||||
// NUL-terminated string.
|
||||
template <typename T>
|
||||
void UniversalPrint(const T& value, ::std::ostream* os) {
|
||||
// A workarond for the bug in VC++ 7.1 that prevents us from instantiating
|
||||
// A workaround for the bug in VC++ 7.1 that prevents us from instantiating
|
||||
// UniversalPrinter with T directly.
|
||||
typedef T T1;
|
||||
UniversalPrinter<T1>::Print(value, os);
|
||||
|
@ -94,11 +94,11 @@ class Aapt(Tool):
|
||||
# get test instrumentation info
|
||||
instrumentation_tag = [t for t in tags if t.startswith("instrumentation ")]
|
||||
if not instrumentation_tag:
|
||||
raise Err("Can not find instrumentation detials in: %s", exe)
|
||||
raise Err("Can not find instrumentation details in: %s", exe)
|
||||
res.pkg_runner = re.search(r"^[ ]+A: android:name\(0x[0-9a-f]{8}\)=\"(?P<runner>.*?)\" \(Raw: \"(?P=runner)\"\)\r?$", instrumentation_tag[0], flags=re.MULTILINE).group("runner")
|
||||
res.pkg_target = re.search(r"^[ ]+A: android:targetPackage\(0x[0-9a-f]{8}\)=\"(?P<pkg>.*?)\" \(Raw: \"(?P=pkg)\"\)\r?$", instrumentation_tag[0], flags=re.MULTILINE).group("pkg")
|
||||
if not res.pkg_name or not res.pkg_runner or not res.pkg_target:
|
||||
raise Err("Can not find instrumentation detials in: %s", exe)
|
||||
raise Err("Can not find instrumentation details in: %s", exe)
|
||||
return res
|
||||
|
||||
|
||||
|
@ -452,7 +452,7 @@ int BadArgTest::run_test_case( int expected_code, const string& _descr )
|
||||
{
|
||||
thrown = true;
|
||||
if (e.code != expected_code &&
|
||||
e.code != cv::Error::StsError && e.code != cv::Error::StsAssert // Exact error codes support will be dropped. Checks should provide proper text messages intead.
|
||||
e.code != cv::Error::StsError && e.code != cv::Error::StsAssert // Exact error codes support will be dropped. Checks should provide proper text messages instead.
|
||||
)
|
||||
{
|
||||
ts->printf(TS::LOG, "%s (test case #%d): the error code %d is different from the expected %d\n",
|
||||
|
@ -110,7 +110,7 @@ public:
|
||||
//set parameters
|
||||
// N - the number of samples stored in memory per model
|
||||
nN = defaultNsamples;
|
||||
//kNN - k nearest neighbour - number on NN for detcting background - default K=[0.1*nN]
|
||||
//kNN - k nearest neighbour - number on NN for detecting background - default K=[0.1*nN]
|
||||
nkNN=MAX(1,cvRound(0.1*nN*3+0.40));
|
||||
|
||||
//Tb - Threshold Tb*kernelwidth
|
||||
@ -292,7 +292,7 @@ protected:
|
||||
//less important parameters - things you might change but be careful
|
||||
////////////////////////
|
||||
int nN;//totlal number of samples
|
||||
int nkNN;//number on NN for detcting background - default K=[0.1*nN]
|
||||
int nkNN;//number on NN for detecting background - default K=[0.1*nN]
|
||||
|
||||
//shadow detection parameters
|
||||
bool bShadowDetection;//default 1 - do shadow detection
|
||||
|
@ -181,7 +181,7 @@ public:
|
||||
//! computes a background image which are the mean of all background gaussians
|
||||
virtual void getBackgroundImage(OutputArray backgroundImage) const CV_OVERRIDE;
|
||||
|
||||
//! re-initiaization method
|
||||
//! re-initialization method
|
||||
void initialize(Size _frameSize, int _frameType)
|
||||
{
|
||||
frameSize = _frameSize;
|
||||
|
@ -225,8 +225,8 @@ enum
|
||||
CV_CAP_PROP_XI_COOLING = 466, // Start camera cooling.
|
||||
CV_CAP_PROP_XI_TARGET_TEMP = 467, // Set sensor target temperature for cooling.
|
||||
CV_CAP_PROP_XI_CHIP_TEMP = 468, // Camera sensor temperature
|
||||
CV_CAP_PROP_XI_HOUS_TEMP = 469, // Camera housing tepmerature
|
||||
CV_CAP_PROP_XI_HOUS_BACK_SIDE_TEMP = 590, // Camera housing back side tepmerature
|
||||
CV_CAP_PROP_XI_HOUS_TEMP = 469, // Camera housing temperature
|
||||
CV_CAP_PROP_XI_HOUS_BACK_SIDE_TEMP = 590, // Camera housing back side temperature
|
||||
CV_CAP_PROP_XI_SENSOR_BOARD_TEMP = 596, // Camera sensor board temperature
|
||||
CV_CAP_PROP_XI_CMS = 470, // Mode of color management system.
|
||||
CV_CAP_PROP_XI_APPLY_CMS = 471, // Enable applying of CMS profiles to xiGetImage (see XI_PRM_INPUT_CMS_PROFILE, XI_PRM_OUTPUT_CMS_PROFILE).
|
||||
|
@ -300,7 +300,7 @@ bool CvCaptureCAM_Aravis::grabFrame()
|
||||
size_t buffer_size;
|
||||
framebuffer = (void*)arv_buffer_get_data (arv_buffer, &buffer_size);
|
||||
|
||||
// retrieve image size properites
|
||||
// retrieve image size properties
|
||||
arv_buffer_get_image_region (arv_buffer, &xoffset, &yoffset, &width, &height);
|
||||
|
||||
// retrieve image ID set by camera
|
||||
|
@ -1298,7 +1298,7 @@ bool CvVideoWriter_AVFoundation::writeFrame(const IplImage* iplimage) {
|
||||
colorSpace, kCGImageAlphaLast|kCGBitmapByteOrderDefault,
|
||||
provider, NULL, false, kCGRenderingIntentDefault);
|
||||
|
||||
//CGImage -> CVPixelBufferRef coversion
|
||||
//CGImage -> CVPixelBufferRef conversion
|
||||
CVPixelBufferRef pixelBuffer = NULL;
|
||||
CFDataRef cfData = CGDataProviderCopyData(CGImageGetDataProvider(cgImage));
|
||||
int status = CVPixelBufferCreateWithBytes(NULL,
|
||||
|
@ -814,7 +814,7 @@ bool CvCaptureFile::setupReadingAt(CMTime position) {
|
||||
if (mMode == CV_CAP_MODE_BGR || mMode == CV_CAP_MODE_RGB) {
|
||||
// For CV_CAP_MODE_BGR, read frames as BGRA (AV Foundation's YUV->RGB conversion is slightly faster than OpenCV's CV_YUV2BGR_YV12)
|
||||
// kCVPixelFormatType_32ABGR is reportedly faster on OS X, but OpenCV doesn't have a CV_ABGR2BGR conversion.
|
||||
// kCVPixelFormatType_24RGB is significanly slower than kCVPixelFormatType_32BGRA.
|
||||
// kCVPixelFormatType_24RGB is significantly slower than kCVPixelFormatType_32BGRA.
|
||||
pixelFormat = kCVPixelFormatType_32BGRA;
|
||||
mFormat = CV_8UC3;
|
||||
} else if (mMode == CV_CAP_MODE_GRAY) {
|
||||
@ -1332,7 +1332,7 @@ bool CvVideoWriter_AVFoundation::writeFrame(const IplImage* iplimage) {
|
||||
colorSpace, kCGImageAlphaLast|kCGBitmapByteOrderDefault,
|
||||
provider, NULL, false, kCGRenderingIntentDefault);
|
||||
|
||||
//CGImage -> CVPixelBufferRef coversion
|
||||
//CGImage -> CVPixelBufferRef conversion
|
||||
CVPixelBufferRef pixelBuffer = NULL;
|
||||
CFDataRef cfData = CGDataProviderCopyData(CGImageGetDataProvider(cgImage));
|
||||
int status = CVPixelBufferCreateWithBytes(NULL,
|
||||
|
@ -953,7 +953,7 @@ bool GStreamerCapture::open(const String &filename_)
|
||||
* \return property value
|
||||
*
|
||||
* There are two ways the properties can be retrieved. For seek-based properties we can query the pipeline.
|
||||
* For frame-based properties, we use the caps of the lasst receivef sample. This means that some properties
|
||||
* For frame-based properties, we use the caps of the last receivef sample. This means that some properties
|
||||
* are not available until a first frame was received
|
||||
*/
|
||||
double GStreamerCapture::getProperty(int propId) const
|
||||
|
@ -46,7 +46,7 @@ if (APPLE_FRAMEWORK AND BUILD_SHARED_LIBS)
|
||||
set (CMAKE_INSTALL_NAME_DIR "@rpath")
|
||||
endif()
|
||||
|
||||
# Hidden visibilty is required for cxx on iOS
|
||||
# Hidden visibility is required for cxx on iOS
|
||||
set (no_warn "-Wno-unused-function -Wno-overloaded-virtual")
|
||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${no_warn}")
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++ -fvisibility=hidden -fvisibility-inlines-hidden ${no_warn}")
|
||||
|
@ -4,7 +4,7 @@
|
||||
# Toolchains with 'img' in the name are for MIPS R6 instruction sets.
|
||||
# It is recommended to use cmake-gui application for build scripts configuration and generation:
|
||||
# 1. Run cmake-gui
|
||||
# 2. Specifiy toolchain file for cross-compiling, mips32r5el-gnu.toolchian.cmake or mips64r6el-gnu.toolchain.cmake
|
||||
# 2. Specify toolchain file for cross-compiling, mips32r5el-gnu.toolchian.cmake or mips64r6el-gnu.toolchain.cmake
|
||||
# can be selected.
|
||||
# 3. Configure and Generate makefiles.
|
||||
# 4. make -j4 & make install
|
||||
|
@ -4,7 +4,7 @@
|
||||
# Toolchains with 'img' in the name are for MIPS R6 instruction sets.
|
||||
# It is recommended to use cmake-gui for build scripts configuration and generation:
|
||||
# 1. Run cmake-gui
|
||||
# 2. Specifiy toolchain file mips32r5el-gnu.toolchian.cmake for cross-compiling.
|
||||
# 2. Specify toolchain file mips32r5el-gnu.toolchian.cmake for cross-compiling.
|
||||
# 3. Configure and Generate makefiles.
|
||||
# 4. make -j4 & make install
|
||||
# ----------------------------------------------------------------------------------------------
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user