Merge pull request #21772 from luzpaz:typo/dnn

This commit is contained in:
Alexander Alekhin 2022-03-24 16:32:46 +00:00
commit 78bc11465b
13 changed files with 20 additions and 20 deletions

View File

@ -389,7 +389,7 @@ CV__DNN_INLINE_NS_BEGIN
/**
* @brief "Deattaches" all the layers, attached to particular layer.
* @brief "Detaches" all the layers, attached to particular layer.
*/
virtual void unsetAttached();
@ -1579,7 +1579,7 @@ public:
* - top-right
* - bottom-right
*
* Use cv::getPerspectiveTransform function to retrive image region without perspective transformations.
* Use cv::getPerspectiveTransform function to retrieve image region without perspective transformations.
*
* @note If DL model doesn't support that kind of output then result may be derived from detectTextRectangles() output.
*

View File

@ -100,7 +100,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
CV_Assert(output.rank() == input.rank());
CV_Assert(output_axis_offset < output.get_axis_size(axis));
/* if axes preceeding the concat axis are all singleton, the concat blocks are contiguous
/* if axes preceding the concat axis are all singleton, the concat blocks are contiguous
* in the output and we can copy each block directly
*/
if (output.size_range(0, axis) == 1)

View File

@ -33,7 +33,7 @@
* template <class T, std::size_t Rank>
* void launch_some_kernel(...);
*
* // creates the dispatcher named "some_dispatcher" which invokves the correct instantiation of "launch_some_kernel"
* // creates the dispatcher named "some_dispatcher" which invokes the correct instantiation of "launch_some_kernel"
* GENERATE_KERNEL_DISPATCHER(some_dispatcher, launch_some_kernel);
*
* // internal API function

View File

@ -72,7 +72,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
__syncthreads();
/* We interchange `threadIdx.x` and `threadIdx.y` so that consecutive output indices map to
* consecutive threads. This would allow writes across threds in a warp to be coalesced.
* consecutive threads. This would allow writes across threads in a warp to be coalesced.
*/
const index_type out_x = blockIdx.y * TILE_SIZE + threadIdx.x;
const index_type out_y_begin = blockIdx.x * TILE_SIZE + threadIdx.y;
@ -156,7 +156,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
* tensor indices be [o1, o2, ...]. The permutation operation essentially copies items
* from the input tensor to new locations in the output tensor as dictated by the indices.
*
* If the size of the nth axis (say i2) of the input is one the input and output indicies for
* If the size of the nth axis (say i2) of the input is one the input and output indices for
* all the elements will be of the form be [i1, 0, ...] and [..., 0, ...] respectively.
* The index does not contribute to the element's address calculation and hence would give
* identical result if it weren't there.

View File

@ -159,7 +159,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
/* We can do a copy if the reduced rank is two and only the first axis is sliced.
* The general requirement is that only one axis is sliced and all the axes that
* preceed the sliced axis are singleton. However, the reductions above will remove
* precede the sliced axis are singleton. However, the reductions above will remove
* all the leading singleton axes and merge the trailing unsliced axes into one, or
* zero if there are no trailing unsliced axes. The latter is handled separately.
*/

View File

@ -68,7 +68,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl {
}
}
});
/* std::shared_ptr<T>::reset invokves the deleter if an exception occurs; hence, we don't
/* std::shared_ptr<T>::reset invokes the deleter if an exception occurs; hence, we don't
* need to have a try-catch block to free the allocated device memory
*/

View File

@ -147,7 +147,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl {
/* host const void pointer to const void device pointer */
CUDA4DNN_HOST_DEVICE explicit DevicePtr(pointer ptr_) noexcept : ptr{ ptr_ } { }
/* allow any device pointer to be implicitly convereted to void device pointer */
/* allow any device pointer to be implicitly converted to void device pointer */
template <class T>
CUDA4DNN_HOST_DEVICE DevicePtr(DevicePtr<T> ptr_) noexcept : ptr{ ptr_.get() } { }
@ -199,7 +199,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl {
/* host pointer to device pointer */
CUDA4DNN_HOST_DEVICE explicit DevicePtr(pointer ptr_) noexcept : ptr{ ptr_ } { }
/* allow any device pointer to mutable memory to be implicitly convereted to void device pointer */
/* allow any device pointer to mutable memory to be implicitly converted to void device pointer */
template <class T, typename std::enable_if<!std::is_const<T>::value, bool>::type = false>
CUDA4DNN_HOST_DEVICE DevicePtr(DevicePtr<T> ptr_) noexcept : ptr { ptr_.get() } { }

View File

@ -791,7 +791,7 @@ namespace cv {
if (layers_vec.size() > 1)
{
// layer ids in layers_vec - inputs of Slice layers
// after adding offset to layers_vec: layer ids - ouputs of Slice layers
// after adding offset to layers_vec: layer ids - outputs of Slice layers
for (size_t k = 0; k < layers_vec.size(); ++k)
layers_vec[k] += layers_vec.size();

View File

@ -799,7 +799,7 @@ struct TextRecognitionModel_Impl : public Model::Impl
virtual
std::string ctcPrefixBeamSearchDecode(const Mat& prediction) {
// CTC prefix beam seach decode.
// CTC prefix beam search decode.
// For more detail, refer to:
// https://distill.pub/2017/ctc/#inference
// https://gist.github.com/awni/56369a90d03953e370f3964c826ed4b0i

View File

@ -331,7 +331,7 @@ teng_graph_t tengine_init(const char* layer_name, float* input_, int inch, int g
teg_weight = kernel_;
}
/* initial the resoruce of tengine */
/* initial the resource of tengine */
if(false == tengine_init_flag)
{
init_tengine();

View File

@ -290,7 +290,7 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath
if (cvtest::debugLevel > 0)
{
const std::vector<size_t>& dims = desc.getDims();
std::cout << "Input: '" << it.first << "' precison=" << desc.getPrecision() << " dims=" << dims.size() << " [";
std::cout << "Input: '" << it.first << "' precision=" << desc.getPrecision() << " dims=" << dims.size() << " [";
for (auto d : dims)
std::cout << " " << d;
std::cout << "] ocv_mat=" << inputsMap[it.first].size << " of " << typeToString(inputsMap[it.first].type()) << std::endl;
@ -308,7 +308,7 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath
if (cvtest::debugLevel > 0)
{
const std::vector<size_t>& dims = desc.getDims();
std::cout << "Output: '" << it.first << "' precison=" << desc.getPrecision() << " dims=" << dims.size() << " [";
std::cout << "Output: '" << it.first << "' precision=" << desc.getPrecision() << " dims=" << dims.size() << " [";
for (auto d : dims)
std::cout << " " << d;
std::cout << "] ocv_mat=" << outputsMap[it.first].size << " of " << typeToString(outputsMap[it.first].type()) << std::endl;

View File

@ -33,7 +33,7 @@ private:
double highfreq = sample_rate / 2;
public:
// Mel filterbanks preperation
// Mel filterbanks preparation
double hz_to_mel(double frequencies)
{
//Converts frequencies from hz to mel scale
@ -149,7 +149,7 @@ public:
return weights;
}
// STFT preperation
// STFT preparation
vector<double> pad_window_center(vector<double>&data, int size)
{
// Pad the window out to n_fft size

View File

@ -44,7 +44,7 @@ import os
model.graph.initializer.insert(i,init)
```
6. Add an additional reshape node to handle the inconsistant input from python and c++ of openCV.
6. Add an additional reshape node to handle the inconsistent input from python and c++ of openCV.
see https://github.com/opencv/opencv/issues/19091
Make & insert a new node with 'Reshape' operation & required initializer
```
@ -256,7 +256,7 @@ class FilterbankFeatures:
weights *= enorm[:, np.newaxis]
return weights
# STFT preperation
# STFT preparation
def pad_window_center(self, data, size, axis=-1, **kwargs):
'''
Centers the data and pads.
@ -329,7 +329,7 @@ class FilterbankFeatures:
then padded with zeros to match n_fft
fft_window : a vector or array of length `n_fft` having values computed by a
window function
pad_mode : mode while padding the singnal
pad_mode : mode while padding the signal
return_complex : returns array with complex data type if `True`
return : Matrix of short-term Fourier transform coefficients.
'''