mirror of
https://github.com/opencv/opencv.git
synced 2025-06-07 17:44:04 +08:00
Misc. modules/ typos
Found via `codespell`
This commit is contained in:
parent
b67523550f
commit
5718d09e39
@ -1788,7 +1788,7 @@ CVAPI(int) cvGraphRemoveVtx( CvGraph* graph, int index );
|
||||
CVAPI(int) cvGraphRemoveVtxByPtr( CvGraph* graph, CvGraphVtx* vtx );
|
||||
|
||||
|
||||
/** Link two vertices specifed by indices or pointers if they
|
||||
/** Link two vertices specified by indices or pointers if they
|
||||
are not connected or return pointer to already existing edge
|
||||
connecting the vertices.
|
||||
Functions return 1 if a new edge was created, 0 otherwise */
|
||||
@ -2648,7 +2648,7 @@ CVAPI(void) cvSetErrStatus( int status );
|
||||
#define CV_ErrModeParent 1 /* Print error and continue */
|
||||
#define CV_ErrModeSilent 2 /* Don't print and continue */
|
||||
|
||||
/** Retrives current error processing mode */
|
||||
/** Retrieves current error processing mode */
|
||||
CVAPI(int) cvGetErrMode( void );
|
||||
|
||||
/** Sets error processing mode, returns previously used mode */
|
||||
@ -2738,7 +2738,7 @@ static char cvFuncName[] = Name
|
||||
/**
|
||||
CV_CALL macro calls CV (or IPL) function, checks error status and
|
||||
signals a error if the function failed. Useful in "parent node"
|
||||
error procesing mode
|
||||
error processing mode
|
||||
*/
|
||||
#define CV_CALL( Func ) \
|
||||
{ \
|
||||
|
@ -56,7 +56,7 @@
|
||||
@{
|
||||
@defgroup cudacore Core part
|
||||
@{
|
||||
@defgroup cudacore_init Initalization and Information
|
||||
@defgroup cudacore_init Initialization and Information
|
||||
@defgroup cudacore_struct Data Structures
|
||||
@}
|
||||
@}
|
||||
|
@ -2184,7 +2184,7 @@ public:
|
||||
Mat_(int _ndims, const int* _sizes);
|
||||
//! n-dim array constructor that sets each matrix element to specified value
|
||||
Mat_(int _ndims, const int* _sizes, const _Tp& value);
|
||||
//! copy/conversion contructor. If m is of different type, it's converted
|
||||
//! copy/conversion constructor. If m is of different type, it's converted
|
||||
Mat_(const Mat& m);
|
||||
//! copy constructor
|
||||
Mat_(const Mat_& m);
|
||||
@ -2275,7 +2275,7 @@ public:
|
||||
static MatExpr eye(int rows, int cols);
|
||||
static MatExpr eye(Size size);
|
||||
|
||||
//! some more overriden methods
|
||||
//! some more overridden methods
|
||||
Mat_& adjustROI( int dtop, int dbottom, int dleft, int dright );
|
||||
Mat_ operator()( const Range& rowRange, const Range& colRange ) const;
|
||||
Mat_ operator()( const Rect& roi ) const;
|
||||
@ -2943,7 +2943,7 @@ public:
|
||||
|
||||
//! the default constructor
|
||||
SparseMat_();
|
||||
//! the full constructor equivelent to SparseMat(dims, _sizes, DataType<_Tp>::type)
|
||||
//! the full constructor equivalent to SparseMat(dims, _sizes, DataType<_Tp>::type)
|
||||
SparseMat_(int dims, const int* _sizes);
|
||||
//! the copy constructor. If DataType<_Tp>.type != m.type(), the m elements are converted
|
||||
SparseMat_(const SparseMat& m);
|
||||
|
@ -92,7 +92,7 @@ Except of the plain constructor which takes a list of elements, Matx can be init
|
||||
float values[] = { 1, 2, 3};
|
||||
Matx31f m(values);
|
||||
@endcode
|
||||
In case if C++11 features are avaliable, std::initializer_list can be also used to initialize Matx:
|
||||
In case if C++11 features are available, std::initializer_list can be also used to initialize Matx:
|
||||
@code{.cpp}
|
||||
Matx31f m = { 1, 2, 3};
|
||||
@endcode
|
||||
|
@ -245,7 +245,7 @@ public:
|
||||
|
||||
/** @brief Maps OpenGL buffer to CUDA device memory.
|
||||
|
||||
This operatation doesn't copy data. Several buffer objects can be mapped to CUDA memory at a time.
|
||||
This operation doesn't copy data. Several buffer objects can be mapped to CUDA memory at a time.
|
||||
|
||||
A mapped data store must be unmapped with ogl::Buffer::unmapDevice before its buffer object is used.
|
||||
*/
|
||||
|
@ -115,7 +115,7 @@ public:
|
||||
always sensible) will be used.
|
||||
|
||||
@param x The initial point, that will become a centroid of an initial simplex. After the algorithm
|
||||
will terminate, it will be setted to the point where the algorithm stops, the point of possible
|
||||
will terminate, it will be set to the point where the algorithm stops, the point of possible
|
||||
minimum.
|
||||
@return The value of a function at the point found.
|
||||
*/
|
||||
@ -288,7 +288,7 @@ Bland's rule <http://en.wikipedia.org/wiki/Bland%27s_rule> is used to prevent cy
|
||||
contain 32- or 64-bit floating point numbers. As a convenience, column-vector may be also submitted,
|
||||
in the latter case it is understood to correspond to \f$c^T\f$.
|
||||
@param Constr `m`-by-`n+1` matrix, whose rightmost column corresponds to \f$b\f$ in formulation above
|
||||
and the remaining to \f$A\f$. It should containt 32- or 64-bit floating point numbers.
|
||||
and the remaining to \f$A\f$. It should contain 32- or 64-bit floating point numbers.
|
||||
@param z The solution will be returned here as a column-vector - it corresponds to \f$c\f$ in the
|
||||
formulation above. It will contain 64-bit floating point numbers.
|
||||
@return One of cv::SolveLPResult
|
||||
|
@ -82,7 +82,7 @@ namespace cv
|
||||
Both types support the following:
|
||||
- Construction from signed and unsigned 32-bit and 64 integers,
|
||||
float/double or raw binary representation
|
||||
- Conversions betweeen each other, to float or double and to int
|
||||
- Conversions between each other, to float or double and to int
|
||||
using @ref cvRound, @ref cvTrunc, @ref cvFloor, @ref cvCeil or a bunch of
|
||||
saturate_cast functions
|
||||
- Add, subtract, multiply, divide, remainder, square root, FMA with absolute precision
|
||||
|
@ -555,7 +555,7 @@ VSX_IMPL_CONV_EVEN_2_4(vec_uint4, vec_double2, vec_ctu, vec_ctuo)
|
||||
// vector population count
|
||||
#define vec_popcntu vec_popcnt
|
||||
|
||||
// overload and redirect wih setting second arg to zero
|
||||
// overload and redirect with setting second arg to zero
|
||||
// since we only support conversions without the second arg
|
||||
#define VSX_IMPL_OVERLOAD_Z2(rt, rg, fnm) \
|
||||
VSX_FINLINE(rt) fnm(const rg& a) { return fnm(a, 0); }
|
||||
@ -610,7 +610,7 @@ VSX_IMPL_CONV_ODD_2_4(vec_uint4, vec_double2, vec_ctuo, vec_ctu)
|
||||
|
||||
#endif // XLC VSX compatibility
|
||||
|
||||
// ignore GCC warning that casued by -Wunused-but-set-variable in rare cases
|
||||
// ignore GCC warning that caused by -Wunused-but-set-variable in rare cases
|
||||
#if defined(__GNUG__) && !defined(__clang__)
|
||||
# define VSX_UNUSED(Tvec) Tvec __attribute__((__unused__))
|
||||
#else // CLANG, XLC
|
||||
@ -736,7 +736,7 @@ VSX_IMPL_LOAD_L8(vec_double2, double)
|
||||
# define vec_cmpne(a, b) vec_not(vec_cmpeq(a, b))
|
||||
#endif
|
||||
|
||||
// absoulte difference
|
||||
// absolute difference
|
||||
#ifndef vec_absd
|
||||
# define vec_absd(a, b) vec_sub(vec_max(a, b), vec_min(a, b))
|
||||
#endif
|
||||
|
@ -289,7 +289,7 @@ protected:
|
||||
};
|
||||
|
||||
/** Image class which owns the data, so it can be allocated and is always
|
||||
freed. It cannot be copied but can be explicity cloned.
|
||||
freed. It cannot be copied but can be explicitly cloned.
|
||||
*/
|
||||
template<typename T>
|
||||
class WImageBuffer : public WImage<T>
|
||||
|
@ -1914,7 +1914,7 @@ cvPtrND( const CvArr* arr, const int* idx, int* _type,
|
||||
}
|
||||
|
||||
|
||||
// Returns specifed element of n-D array given linear index
|
||||
// Returns specified element of n-D array given linear index
|
||||
CV_IMPL CvScalar
|
||||
cvGet1D( const CvArr* arr, int idx )
|
||||
{
|
||||
@ -1949,7 +1949,7 @@ cvGet1D( const CvArr* arr, int idx )
|
||||
}
|
||||
|
||||
|
||||
// Returns specifed element of 2D array
|
||||
// Returns specified element of 2D array
|
||||
CV_IMPL CvScalar
|
||||
cvGet2D( const CvArr* arr, int y, int x )
|
||||
{
|
||||
@ -1983,7 +1983,7 @@ cvGet2D( const CvArr* arr, int y, int x )
|
||||
}
|
||||
|
||||
|
||||
// Returns specifed element of 3D array
|
||||
// Returns specified element of 3D array
|
||||
CV_IMPL CvScalar
|
||||
cvGet3D( const CvArr* arr, int z, int y, int x )
|
||||
{
|
||||
@ -2005,7 +2005,7 @@ cvGet3D( const CvArr* arr, int z, int y, int x )
|
||||
}
|
||||
|
||||
|
||||
// Returns specifed element of nD array
|
||||
// Returns specified element of nD array
|
||||
CV_IMPL CvScalar
|
||||
cvGetND( const CvArr* arr, const int* idx )
|
||||
{
|
||||
@ -2025,7 +2025,7 @@ cvGetND( const CvArr* arr, const int* idx )
|
||||
}
|
||||
|
||||
|
||||
// Returns specifed element of n-D array given linear index
|
||||
// Returns specified element of n-D array given linear index
|
||||
CV_IMPL double
|
||||
cvGetReal1D( const CvArr* arr, int idx )
|
||||
{
|
||||
@ -2064,7 +2064,7 @@ cvGetReal1D( const CvArr* arr, int idx )
|
||||
}
|
||||
|
||||
|
||||
// Returns specifed element of 2D array
|
||||
// Returns specified element of 2D array
|
||||
CV_IMPL double
|
||||
cvGetReal2D( const CvArr* arr, int y, int x )
|
||||
{
|
||||
@ -2103,7 +2103,7 @@ cvGetReal2D( const CvArr* arr, int y, int x )
|
||||
}
|
||||
|
||||
|
||||
// Returns specifed element of 3D array
|
||||
// Returns specified element of 3D array
|
||||
CV_IMPL double
|
||||
cvGetReal3D( const CvArr* arr, int z, int y, int x )
|
||||
{
|
||||
@ -2131,7 +2131,7 @@ cvGetReal3D( const CvArr* arr, int z, int y, int x )
|
||||
}
|
||||
|
||||
|
||||
// Returns specifed element of nD array
|
||||
// Returns specified element of nD array
|
||||
CV_IMPL double
|
||||
cvGetRealND( const CvArr* arr, const int* idx )
|
||||
{
|
||||
@ -2156,7 +2156,7 @@ cvGetRealND( const CvArr* arr, const int* idx )
|
||||
}
|
||||
|
||||
|
||||
// Assigns new value to specifed element of nD array given linear index
|
||||
// Assigns new value to specified element of nD array given linear index
|
||||
CV_IMPL void
|
||||
cvSet1D( CvArr* arr, int idx, CvScalar scalar )
|
||||
{
|
||||
@ -2187,7 +2187,7 @@ cvSet1D( CvArr* arr, int idx, CvScalar scalar )
|
||||
}
|
||||
|
||||
|
||||
// Assigns new value to specifed element of 2D array
|
||||
// Assigns new value to specified element of 2D array
|
||||
CV_IMPL void
|
||||
cvSet2D( CvArr* arr, int y, int x, CvScalar scalar )
|
||||
{
|
||||
@ -2216,7 +2216,7 @@ cvSet2D( CvArr* arr, int y, int x, CvScalar scalar )
|
||||
}
|
||||
|
||||
|
||||
// Assigns new value to specifed element of 3D array
|
||||
// Assigns new value to specified element of 3D array
|
||||
CV_IMPL void
|
||||
cvSet3D( CvArr* arr, int z, int y, int x, CvScalar scalar )
|
||||
{
|
||||
@ -2234,7 +2234,7 @@ cvSet3D( CvArr* arr, int z, int y, int x, CvScalar scalar )
|
||||
}
|
||||
|
||||
|
||||
// Assigns new value to specifed element of nD array
|
||||
// Assigns new value to specified element of nD array
|
||||
CV_IMPL void
|
||||
cvSetND( CvArr* arr, const int* idx, CvScalar scalar )
|
||||
{
|
||||
|
@ -150,7 +150,7 @@ namespace cv
|
||||
d*=-1.0;
|
||||
d.copyTo(r);
|
||||
|
||||
//here everything goes. check that everything is setted properly
|
||||
//here everything goes. check that everything is set properly
|
||||
dprintf(("proxy_x\n"));print_matrix(proxy_x);
|
||||
dprintf(("d first time\n"));print_matrix(d);
|
||||
dprintf(("r\n"));print_matrix(r);
|
||||
|
@ -932,7 +932,7 @@ namespace
|
||||
{
|
||||
// Defines for GPU Architecture types (using the SM version to determine the # of cores per SM
|
||||
typedef struct {
|
||||
int SM; // 0xMm (hexidecimal notation), M = SM Major version, and m = SM minor version
|
||||
int SM; // 0xMm (hexadecimal notation), M = SM Major version, and m = SM minor version
|
||||
int Cores;
|
||||
} SMtoCores;
|
||||
|
||||
|
@ -129,7 +129,7 @@ system("pause");
|
||||
return 0;
|
||||
}
|
||||
|
||||
****Suggesttion for imporving Simplex implentation***************************************************************************************
|
||||
****Suggestion for improving Simplex implementation***************************************************************************************
|
||||
|
||||
Currently the downhilll simplex method outputs the function value that is minimized. It should also return the coordinate points where the
|
||||
function is minimized. This is very useful in many applications such as using back projection methods to find a point of intersection of
|
||||
|
@ -1630,7 +1630,7 @@ Context& initializeContextFromGL()
|
||||
|
||||
for (int i = 0; i < (int)numPlatforms; i++)
|
||||
{
|
||||
// query platform extension: presence of "cl_khr_gl_sharing" extension is requred
|
||||
// query platform extension: presence of "cl_khr_gl_sharing" extension is required
|
||||
{
|
||||
AutoBuffer<char> extensionStr;
|
||||
|
||||
|
@ -730,7 +730,7 @@ void ThreadPool::setNumOfThreads(unsigned n)
|
||||
{
|
||||
num_threads = n;
|
||||
if (n == 1)
|
||||
if (job == NULL) reconfigure(0); // stop worker threads immediatelly
|
||||
if (job == NULL) reconfigure(0); // stop worker threads immediately
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -470,7 +470,7 @@ public:
|
||||
|
||||
/*
|
||||
* a convertor must provide :
|
||||
* - `operator >> (uchar * & dst)` for writting current binary data to `dst` and moving to next data.
|
||||
* - `operator >> (uchar * & dst)` for writing current binary data to `dst` and moving to next data.
|
||||
* - `operator bool` for checking if current loaction is valid and not the end.
|
||||
*/
|
||||
template<typename _to_binary_convertor_t> inline
|
||||
@ -493,7 +493,7 @@ public:
|
||||
|
||||
bool flush()
|
||||
{
|
||||
/* controll line width, so on. */
|
||||
/* control line width, so on. */
|
||||
size_t len = base64_encode(src_beg, base64_buffer.data(), 0U, src_cur - src_beg);
|
||||
if (len == 0U)
|
||||
return false;
|
||||
|
@ -259,7 +259,7 @@ cvOpenFileStorage( const char* query, CvMemStorage* dststorage, int flags, const
|
||||
xml_buf_size = MIN(xml_buf_size, int(file_size));
|
||||
fseek( fs->file, -xml_buf_size, SEEK_END );
|
||||
char* xml_buf = (char*)cvAlloc( xml_buf_size+2 );
|
||||
// find the last occurence of </opencv_storage>
|
||||
// find the last occurrence of </opencv_storage>
|
||||
for(;;)
|
||||
{
|
||||
int line_offset = (int)ftell( fs->file );
|
||||
|
@ -1230,7 +1230,7 @@ static void* icvReadGraph( CvFileStorage* fs, CvFileNode* node )
|
||||
vtx_buf[vtx1], vtx_buf[vtx2], 0, &edge );
|
||||
|
||||
if( result == 0 )
|
||||
CV_Error( CV_StsBadArg, "Duplicated edge has occured" );
|
||||
CV_Error( CV_StsBadArg, "Duplicated edge has occurred" );
|
||||
|
||||
edge->weight = *(float*)(dst_ptr + sizeof(int)*2);
|
||||
if( elem_size > (int)sizeof(CvGraphEdge) )
|
||||
|
@ -481,7 +481,7 @@ struct HWFeatures
|
||||
have[CV_CPU_NEON] = (features & ANDROID_CPU_ARM_FEATURE_NEON) != 0;
|
||||
have[CV_CPU_FP16] = (features & ANDROID_CPU_ARM_FEATURE_VFP_FP16) != 0;
|
||||
#else
|
||||
__android_log_print(ANDROID_LOG_INFO, "OpenCV", "cpufeatures library is not avaialble for CPU detection");
|
||||
__android_log_print(ANDROID_LOG_INFO, "OpenCV", "cpufeatures library is not available for CPU detection");
|
||||
#if CV_NEON
|
||||
__android_log_print(ANDROID_LOG_INFO, "OpenCV", "- NEON instructions is enabled via build flags");
|
||||
have[CV_CPU_NEON] = true;
|
||||
|
@ -112,7 +112,7 @@ OCL_TEST_P(Dft, Mat)
|
||||
OCL_OFF(cv::dft(src, dst, dft_flags, nonzero_rows));
|
||||
OCL_ON(cv::dft(usrc, udst, dft_flags, nonzero_rows));
|
||||
|
||||
// In case forward R2C 1d tranform dst contains only half of output
|
||||
// In case forward R2C 1d transform dst contains only half of output
|
||||
// without complex conjugate
|
||||
if (dft_type == R2C && is1d && (dft_flags & cv::DFT_INVERSE) == 0)
|
||||
{
|
||||
|
@ -51,7 +51,7 @@ static void mytest(cv::Ptr<cv::DownhillSolver> solver,cv::Ptr<cv::MinProblemSolv
|
||||
solver->getInitStep(settedStep);
|
||||
ASSERT_TRUE(settedStep.rows==1 && settedStep.cols==ndim);
|
||||
ASSERT_TRUE(std::equal(step.begin<double>(),step.end<double>(),settedStep.begin<double>()));
|
||||
std::cout<<"step setted:\n\t"<<step<<std::endl;
|
||||
std::cout<<"step set:\n\t"<<step<<std::endl;
|
||||
double res=solver->minimize(x);
|
||||
std::cout<<"res:\n\t"<<res<<std::endl;
|
||||
std::cout<<"x:\n\t"<<x<<std::endl;
|
||||
|
@ -466,7 +466,7 @@ bool CV_OperationsTest::TestSubMatAccess()
|
||||
Vec3f ydir(1.f, 0.f, 1.f);
|
||||
Vec3f fpt(0.1f, 0.7f, 0.2f);
|
||||
T_bs.setTo(0);
|
||||
T_bs(Range(0,3),Range(2,3)) = 1.0*Mat(cdir); // wierd OpenCV stuff, need to do multiply
|
||||
T_bs(Range(0,3),Range(2,3)) = 1.0*Mat(cdir); // weird OpenCV stuff, need to do multiply
|
||||
T_bs(Range(0,3),Range(1,2)) = 1.0*Mat(ydir);
|
||||
T_bs(Range(0,3),Range(0,1)) = 1.0*Mat(cdir.cross(ydir));
|
||||
T_bs(Range(0,3),Range(3,4)) = 1.0*Mat(fpt);
|
||||
|
@ -1192,7 +1192,7 @@ OCL_TEST(UMat, DISABLED_OCL_ThreadSafe_CleanupCallback_1_VeryLongTest)
|
||||
}
|
||||
}
|
||||
|
||||
// Case 2: concurent deallocation of UMatData between UMat and Mat deallocators. Hard to catch!
|
||||
// Case 2: concurrent deallocation of UMatData between UMat and Mat deallocators. Hard to catch!
|
||||
OCL_TEST(UMat, DISABLED_OCL_ThreadSafe_CleanupCallback_2_VeryLongTest)
|
||||
{
|
||||
if (!cv::ocl::useOpenCL())
|
||||
|
@ -41,7 +41,7 @@ endif()
|
||||
|
||||
add_definitions(-DHAVE_PROTOBUF=1)
|
||||
|
||||
#supress warnings in autogenerated caffe.pb.* files
|
||||
#suppress warnings in autogenerated caffe.pb.* files
|
||||
ocv_warnings_disable(CMAKE_CXX_FLAGS
|
||||
-Wunused-parameter -Wundef -Wignored-qualifiers -Wno-enum-compare
|
||||
-Wdeprecated-declarations
|
||||
|
@ -53,7 +53,7 @@
|
||||
- API for new layers creation, layers are building bricks of neural networks;
|
||||
- set of built-in most-useful Layers;
|
||||
- API to constuct and modify comprehensive neural networks from layers;
|
||||
- functionality for loading serialized networks models from differnet frameworks.
|
||||
- functionality for loading serialized networks models from different frameworks.
|
||||
|
||||
Functionality of this module is designed only for forward pass computations (i. e. network testing).
|
||||
A network training is in principle not supported.
|
||||
|
@ -51,13 +51,13 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
|
||||
|
||||
/** @defgroup dnnLayerList Partial List of Implemented Layers
|
||||
@{
|
||||
This subsection of dnn module contains information about bult-in layers and their descriptions.
|
||||
This subsection of dnn module contains information about built-in layers and their descriptions.
|
||||
|
||||
Classes listed here, in fact, provides C++ API for creating intances of bult-in layers.
|
||||
Classes listed here, in fact, provides C++ API for creating instances of built-in layers.
|
||||
In addition to this way of layers instantiation, there is a more common factory API (see @ref dnnLayerFactory), it allows to create layers dynamically (by name) and register new ones.
|
||||
You can use both API, but factory API is less convinient for native C++ programming and basically designed for use inside importers (see @ref readNetFromCaffe(), @ref readNetFromTorch(), @ref readNetFromTensorflow()).
|
||||
You can use both API, but factory API is less convenient for native C++ programming and basically designed for use inside importers (see @ref readNetFromCaffe(), @ref readNetFromTorch(), @ref readNetFromTensorflow()).
|
||||
|
||||
Bult-in layers partially reproduce functionality of corresponding Caffe and Torch7 layers.
|
||||
Built-in layers partially reproduce functionality of corresponding Caffe and Torch7 layers.
|
||||
In partuclar, the following layers and Caffe importer were tested to reproduce <a href="http://caffe.berkeleyvision.org/tutorial/layers.html">Caffe</a> functionality:
|
||||
- Convolution
|
||||
- Deconvolution
|
||||
@ -125,12 +125,12 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
|
||||
virtual void setOutShape(const MatShape &outTailShape = MatShape()) = 0;
|
||||
|
||||
/** @deprecated Use flag `produce_cell_output` in LayerParams.
|
||||
* @brief Specifies either interpet first dimension of input blob as timestamp dimenion either as sample.
|
||||
* @brief Specifies either interpret first dimension of input blob as timestamp dimenion either as sample.
|
||||
*
|
||||
* If flag is set to true then shape of input blob will be interpeted as [`T`, `N`, `[data dims]`] where `T` specifies number of timpestamps, `N` is number of independent streams.
|
||||
* If flag is set to true then shape of input blob will be interpreted as [`T`, `N`, `[data dims]`] where `T` specifies number of timestamps, `N` is number of independent streams.
|
||||
* In this case each forward() call will iterate through `T` timestamps and update layer's state `T` times.
|
||||
*
|
||||
* If flag is set to false then shape of input blob will be interpeted as [`N`, `[data dims]`].
|
||||
* If flag is set to false then shape of input blob will be interpreted as [`N`, `[data dims]`].
|
||||
* In this case each forward() call will make one iteration and produce one timestamp with shape [`N`, `[out dims]`].
|
||||
*/
|
||||
CV_DEPRECATED virtual void setUseTimstampsDim(bool use = true) = 0;
|
||||
@ -146,7 +146,7 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
|
||||
* @param output contains computed outputs: @f$h_t@f$ (and @f$c_t@f$ if setProduceCellOutput() flag was set to true).
|
||||
*
|
||||
* If setUseTimstampsDim() is set to true then @p input[0] should has at least two dimensions with the following shape: [`T`, `N`, `[data dims]`],
|
||||
* where `T` specifies number of timpestamps, `N` is number of independent streams (i.e. @f$ x_{t_0 + t}^{stream} @f$ is stored inside @p input[0][t, stream, ...]).
|
||||
* where `T` specifies number of timestamps, `N` is number of independent streams (i.e. @f$ x_{t_0 + t}^{stream} @f$ is stored inside @p input[0][t, stream, ...]).
|
||||
*
|
||||
* If setUseTimstampsDim() is set to fase then @p input[0] should contain single timestamp, its shape should has form [`N`, `[data dims]`] with at least one dimension.
|
||||
* (i.e. @f$ x_{t}^{stream} @f$ is stored inside @p input[0][stream, ...]).
|
||||
@ -328,7 +328,7 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
|
||||
* @param begin Vector of start indices
|
||||
* @param size Vector of sizes
|
||||
*
|
||||
* More convinient numpy-like slice. One and only output blob
|
||||
* More convenient numpy-like slice. One and only output blob
|
||||
* is a slice `input[begin[0]:begin[0]+size[0], begin[1]:begin[1]+size[1], ...]`
|
||||
*
|
||||
* 3. Torch mode
|
||||
|
@ -691,7 +691,7 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
|
||||
* @param swapRB flag which indicates that swap first and last channels
|
||||
* in 3-channel image is necessary.
|
||||
* @param crop flag which indicates whether image will be cropped after resize or not
|
||||
* @details if @p crop is true, input image is resized so one side after resize is equal to corresponing
|
||||
* @details if @p crop is true, input image is resized so one side after resize is equal to corresponding
|
||||
* dimension in @p size and another one is equal or larger. Then, crop from the center is performed.
|
||||
* If @p crop is false, direct resize without cropping and preserving aspect ratio is performed.
|
||||
* @returns 4-dimansional Mat with NCHW dimensions order.
|
||||
@ -719,7 +719,7 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
|
||||
* @param swapRB flag which indicates that swap first and last channels
|
||||
* in 3-channel image is necessary.
|
||||
* @param crop flag which indicates whether image will be cropped after resize or not
|
||||
* @details if @p crop is true, input image is resized so one side after resize is equal to corresponing
|
||||
* @details if @p crop is true, input image is resized so one side after resize is equal to corresponding
|
||||
* dimension in @p size and another one is equal or larger. Then, crop from the center is performed.
|
||||
* If @p crop is false, direct resize without cropping and preserving aspect ratio is performed.
|
||||
* @returns 4-dimansional Mat with NCHW dimensions order.
|
||||
|
@ -131,7 +131,7 @@ message PriorBoxParameter {
|
||||
// Variance for adjusting the prior bboxes.
|
||||
repeated float variance = 6;
|
||||
// By default, we calculate img_height, img_width, step_x, step_y based on
|
||||
// bottom[0] (feat) and bottom[1] (img). Unless these values are explicitely
|
||||
// bottom[0] (feat) and bottom[1] (img). Unless these values are explicitly
|
||||
// provided.
|
||||
// Explicitly provide the img_size.
|
||||
optional uint32 img_size = 7;
|
||||
|
@ -58,7 +58,7 @@ namespace cv {
|
||||
namespace dnn {
|
||||
CV__DNN_EXPERIMENTAL_NS_BEGIN
|
||||
|
||||
// this option is usefull to run valgrind memory errors detection
|
||||
// this option is useful to run valgrind memory errors detection
|
||||
static bool DNN_DISABLE_MEMORY_OPTIMIZATIONS = utils::getConfigurationParameterBool("OPENCV_DNN_DISABLE_MEMORY_OPTIMIZATIONS", false);
|
||||
|
||||
using std::vector;
|
||||
@ -911,7 +911,7 @@ struct Net::Impl
|
||||
int id = getLayerId(layerName);
|
||||
|
||||
if (id < 0)
|
||||
CV_Error(Error::StsError, "Requsted layer \"" + layerName + "\" not found");
|
||||
CV_Error(Error::StsError, "Requested layer \"" + layerName + "\" not found");
|
||||
|
||||
return getLayerData(id);
|
||||
}
|
||||
@ -1897,7 +1897,7 @@ struct Net::Impl
|
||||
if ((size_t)pin.oid >= ld.outputBlobs.size())
|
||||
{
|
||||
CV_Error(Error::StsOutOfRange, format("Layer \"%s\" produce only %d outputs, "
|
||||
"the #%d was requsted", ld.name.c_str(),
|
||||
"the #%d was requested", ld.name.c_str(),
|
||||
ld.outputBlobs.size(), pin.oid));
|
||||
}
|
||||
if (preferableTarget != DNN_TARGET_CPU)
|
||||
|
@ -88,7 +88,7 @@ public:
|
||||
for (int curAxis = 0; curAxis < outputs[0].size(); curAxis++)
|
||||
{
|
||||
if (curAxis != cAxis && outputs[0][curAxis] != curShape[curAxis])
|
||||
CV_Error(Error::StsBadSize, "Inconsitent shape for ConcatLayer");
|
||||
CV_Error(Error::StsBadSize, "Inconsistent shape for ConcatLayer");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -185,7 +185,7 @@ static bool ocl4dnnFastImageGEMM(const CBLAS_TRANSPOSE TransA,
|
||||
int blockC_height = blocksize;
|
||||
|
||||
int use_buffer_indicator = 8;
|
||||
// To fix the edge problem casued by the sub group block read.
|
||||
// To fix the edge problem caused by the sub group block read.
|
||||
// we have to pad the image if it's not multiple of tile.
|
||||
// just padding one line is enough as the sub group block read
|
||||
// will clamp to edge according to the spec.
|
||||
|
@ -188,7 +188,7 @@ __kernel void ConvolveBasic(
|
||||
#define VLOAD4(_v, _p) do { _v = vload4(0, _p); } while(0)
|
||||
|
||||
// Each work-item computes a OUT_BLOCK_WIDTH * OUT_BLOCK_HEIGHT region of one output map.
|
||||
// Each work-group (which will be mapped to 1 SIMD16/SIMD8 EU thread) will compute 16/8 different feature maps, but each feature map is for the same region of the imput image.
|
||||
// Each work-group (which will be mapped to 1 SIMD16/SIMD8 EU thread) will compute 16/8 different feature maps, but each feature map is for the same region of the input image.
|
||||
// NDRange: (output_width+pad)/ OUT_BLOCK_WIDTH, (output_height+pad)/OUT_BLOCK_HEIGHT, NUM_FILTERS/OUT_BLOCK_DEPTH
|
||||
|
||||
// NOTE: for beignet this reqd_work_group_size does not guarantee that SIMD16 mode will be used, the compiler could choose to use two SIMD8 threads, and if that happens the code will break.
|
||||
@ -220,7 +220,7 @@ convolve_simd(
|
||||
|
||||
int in_addr;
|
||||
|
||||
// find weights adress of given neuron (lid is index)
|
||||
// find weights address of given neuron (lid is index)
|
||||
unsigned int weight_addr = (fmg % (ALIGNED_NUM_FILTERS/SIMD_SIZE)) * INPUT_DEPTH * KERNEL_WIDTH * KERNEL_HEIGHT * SIMD_SIZE + lid;
|
||||
|
||||
for(int i=0;i<OUT_BLOCK_SIZE;i++) {
|
||||
|
@ -1096,9 +1096,9 @@ void TFImporter::populateNet(Net dstNet)
|
||||
dstNet.setInputsNames(netInputs);
|
||||
}
|
||||
else if (type == "Split") {
|
||||
// TODO: determing axis index remapping by input dimensions order of input blob
|
||||
// TODO: determining axis index remapping by input dimensions order of input blob
|
||||
// TODO: slicing input may be Const op
|
||||
// TODO: slicing kernels for convolutions - in current implenmentation it is impossible
|
||||
// TODO: slicing kernels for convolutions - in current implementation it is impossible
|
||||
// TODO: add parsing num of slices parameter
|
||||
CV_Assert(layer.input_size() == 2);
|
||||
// num_split
|
||||
|
@ -8,11 +8,11 @@ try:
|
||||
import cv2 as cv
|
||||
except ImportError:
|
||||
raise ImportError('Can\'t find OpenCV Python module. If you\'ve built it from sources without installation, '
|
||||
'configure environemnt variable PYTHONPATH to "opencv_build_dir/lib" directory (with "python3" subdirectory if required)')
|
||||
'configure environment variable PYTHONPATH to "opencv_build_dir/lib" directory (with "python3" subdirectory if required)')
|
||||
try:
|
||||
import torch
|
||||
except ImportError:
|
||||
raise ImportError('Can\'t find pytorch. Please intall it by following instructions on the official site')
|
||||
raise ImportError('Can\'t find pytorch. Please install it by following instructions on the official site')
|
||||
|
||||
from torch.utils.serialization import load_lua
|
||||
from pascal_semsegm_test_fcn import eval_segm_result, get_conf_mat, get_metrics, DatasetImageFetch, SemSegmEvaluation
|
||||
|
@ -9,12 +9,12 @@ try:
|
||||
import caffe
|
||||
except ImportError:
|
||||
raise ImportError('Can\'t find Caffe Python module. If you\'ve built it from sources without installation, '
|
||||
'configure environemnt variable PYTHONPATH to "git/caffe/python" directory')
|
||||
'configure environment variable PYTHONPATH to "git/caffe/python" directory')
|
||||
try:
|
||||
import cv2 as cv
|
||||
except ImportError:
|
||||
raise ImportError('Can\'t find OpenCV Python module. If you\'ve built it from sources without installation, '
|
||||
'configure environemnt variable PYTHONPATH to "opencv_build_dir/lib" directory (with "python3" subdirectory if required)')
|
||||
'configure environment variable PYTHONPATH to "opencv_build_dir/lib" directory (with "python3" subdirectory if required)')
|
||||
|
||||
|
||||
class DataFetch(object):
|
||||
|
@ -7,12 +7,12 @@ try:
|
||||
import caffe
|
||||
except ImportError:
|
||||
raise ImportError('Can\'t find Caffe Python module. If you\'ve built it from sources without installation, '
|
||||
'configure environemnt variable PYTHONPATH to "git/caffe/python" directory')
|
||||
'configure environment variable PYTHONPATH to "git/caffe/python" directory')
|
||||
try:
|
||||
import cv2 as cv
|
||||
except ImportError:
|
||||
raise ImportError('Can\'t find OpenCV Python module. If you\'ve built it from sources without installation, '
|
||||
'configure environemnt variable PYTHONPATH to "opencv_build_dir/lib" directory (with "python3" subdirectory if required)')
|
||||
'configure environment variable PYTHONPATH to "opencv_build_dir/lib" directory (with "python3" subdirectory if required)')
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
|
@ -9,10 +9,10 @@ try:
|
||||
import cv2 as cv
|
||||
except ImportError:
|
||||
raise ImportError('Can\'t find OpenCV Python module. If you\'ve built it from sources without installation, '
|
||||
'configure environemnt variable PYTHONPATH to "opencv_build_dir/lib" directory (with "python3" subdirectory if required)')
|
||||
'configure environment variable PYTHONPATH to "opencv_build_dir/lib" directory (with "python3" subdirectory if required)')
|
||||
|
||||
# If you've got an exception "Cannot load libmkl_avx.so or libmkl_def.so" or similar, try to export next variable
|
||||
# before runnigng the script:
|
||||
# before running the script:
|
||||
# LD_PRELOAD=/opt/intel/mkl/lib/intel64/libmkl_core.so:/opt/intel/mkl/lib/intel64/libmkl_sequential.so
|
||||
|
||||
|
||||
|
@ -9,7 +9,7 @@ try:
|
||||
import cv2 as cv
|
||||
except ImportError:
|
||||
raise ImportError('Can\'t find OpenCV Python module. If you\'ve built it from sources without installation, '
|
||||
'configure environemnt variable PYTHONPATH to "opencv_build_dir/lib" directory (with "python3" subdirectory if required)')
|
||||
'configure environment variable PYTHONPATH to "opencv_build_dir/lib" directory (with "python3" subdirectory if required)')
|
||||
|
||||
|
||||
def get_metrics(conf_mat):
|
||||
|
@ -73,7 +73,7 @@ namespace cv
|
||||
CV_EXPORTS Affine3d makeTransformToGlobal(const Vec3d& axis_x, const Vec3d& axis_y, const Vec3d& axis_z, const Vec3d& origin = Vec3d::all(0));
|
||||
|
||||
/** @brief Constructs camera pose from position, focal_point and up_vector (see gluLookAt() for more
|
||||
infromation).
|
||||
information).
|
||||
|
||||
@param position Position of the camera in global coordinate frame.
|
||||
@param focal_point Focal point of the camera in global coordinate frame.
|
||||
|
@ -151,7 +151,7 @@ namespace cv { namespace viz {
|
||||
{
|
||||
[self breakEventLoop];
|
||||
|
||||
// The NSWindow is closing, so prevent anyone from accidently using it
|
||||
// The NSWindow is closing, so prevent anyone from accidentally using it
|
||||
renWin->SetRootWindow(NULL);
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user