mirror of
https://github.com/opencv/opencv.git
synced 2025-06-07 17:44:04 +08:00
dnn: fix various dnn related typos
Fixes source comments and documentation related to dnn code.
This commit is contained in:
parent
90671233c6
commit
8e8e4bbabc
@ -389,7 +389,7 @@ CV__DNN_INLINE_NS_BEGIN
|
|||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief "Deattaches" all the layers, attached to particular layer.
|
* @brief "Detaches" all the layers, attached to particular layer.
|
||||||
*/
|
*/
|
||||||
virtual void unsetAttached();
|
virtual void unsetAttached();
|
||||||
|
|
||||||
@ -1579,7 +1579,7 @@ public:
|
|||||||
* - top-right
|
* - top-right
|
||||||
* - bottom-right
|
* - bottom-right
|
||||||
*
|
*
|
||||||
* Use cv::getPerspectiveTransform function to retrive image region without perspective transformations.
|
* Use cv::getPerspectiveTransform function to retrieve image region without perspective transformations.
|
||||||
*
|
*
|
||||||
* @note If DL model doesn't support that kind of output then result may be derived from detectTextRectangles() output.
|
* @note If DL model doesn't support that kind of output then result may be derived from detectTextRectangles() output.
|
||||||
*
|
*
|
||||||
|
@ -100,7 +100,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
|
|||||||
CV_Assert(output.rank() == input.rank());
|
CV_Assert(output.rank() == input.rank());
|
||||||
CV_Assert(output_axis_offset < output.get_axis_size(axis));
|
CV_Assert(output_axis_offset < output.get_axis_size(axis));
|
||||||
|
|
||||||
/* if axes preceeding the concat axis are all singleton, the concat blocks are contiguous
|
/* if axes preceding the concat axis are all singleton, the concat blocks are contiguous
|
||||||
* in the output and we can copy each block directly
|
* in the output and we can copy each block directly
|
||||||
*/
|
*/
|
||||||
if (output.size_range(0, axis) == 1)
|
if (output.size_range(0, axis) == 1)
|
||||||
|
@ -33,7 +33,7 @@
|
|||||||
* template <class T, std::size_t Rank>
|
* template <class T, std::size_t Rank>
|
||||||
* void launch_some_kernel(...);
|
* void launch_some_kernel(...);
|
||||||
*
|
*
|
||||||
* // creates the dispatcher named "some_dispatcher" which invokves the correct instantiation of "launch_some_kernel"
|
* // creates the dispatcher named "some_dispatcher" which invokes the correct instantiation of "launch_some_kernel"
|
||||||
* GENERATE_KERNEL_DISPATCHER(some_dispatcher, launch_some_kernel);
|
* GENERATE_KERNEL_DISPATCHER(some_dispatcher, launch_some_kernel);
|
||||||
*
|
*
|
||||||
* // internal API function
|
* // internal API function
|
||||||
|
@ -72,7 +72,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
|
|||||||
__syncthreads();
|
__syncthreads();
|
||||||
|
|
||||||
/* We interchange `threadIdx.x` and `threadIdx.y` so that consecutive output indices map to
|
/* We interchange `threadIdx.x` and `threadIdx.y` so that consecutive output indices map to
|
||||||
* consecutive threads. This would allow writes across threds in a warp to be coalesced.
|
* consecutive threads. This would allow writes across threads in a warp to be coalesced.
|
||||||
*/
|
*/
|
||||||
const index_type out_x = blockIdx.y * TILE_SIZE + threadIdx.x;
|
const index_type out_x = blockIdx.y * TILE_SIZE + threadIdx.x;
|
||||||
const index_type out_y_begin = blockIdx.x * TILE_SIZE + threadIdx.y;
|
const index_type out_y_begin = blockIdx.x * TILE_SIZE + threadIdx.y;
|
||||||
@ -156,7 +156,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
|
|||||||
* tensor indices be [o1, o2, ...]. The permutation operation essentially copies items
|
* tensor indices be [o1, o2, ...]. The permutation operation essentially copies items
|
||||||
* from the input tensor to new locations in the output tensor as dictated by the indices.
|
* from the input tensor to new locations in the output tensor as dictated by the indices.
|
||||||
*
|
*
|
||||||
* If the size of the nth axis (say i2) of the input is one the input and output indicies for
|
* If the size of the nth axis (say i2) of the input is one the input and output indices for
|
||||||
* all the elements will be of the form be [i1, 0, ...] and [..., 0, ...] respectively.
|
* all the elements will be of the form be [i1, 0, ...] and [..., 0, ...] respectively.
|
||||||
* The index does not contribute to the element's address calculation and hence would give
|
* The index does not contribute to the element's address calculation and hence would give
|
||||||
* identical result if it weren't there.
|
* identical result if it weren't there.
|
||||||
|
@ -159,7 +159,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
|
|||||||
|
|
||||||
/* We can do a copy if the reduced rank is two and only the first axis is sliced.
|
/* We can do a copy if the reduced rank is two and only the first axis is sliced.
|
||||||
* The general requirement is that only one axis is sliced and all the axes that
|
* The general requirement is that only one axis is sliced and all the axes that
|
||||||
* preceed the sliced axis are singleton. However, the reductions above will remove
|
* precede the sliced axis are singleton. However, the reductions above will remove
|
||||||
* all the leading singleton axes and merge the trailing unsliced axes into one, or
|
* all the leading singleton axes and merge the trailing unsliced axes into one, or
|
||||||
* zero if there are no trailing unsliced axes. The latter is handled separately.
|
* zero if there are no trailing unsliced axes. The latter is handled separately.
|
||||||
*/
|
*/
|
||||||
|
@ -68,7 +68,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
/* std::shared_ptr<T>::reset invokves the deleter if an exception occurs; hence, we don't
|
/* std::shared_ptr<T>::reset invokes the deleter if an exception occurs; hence, we don't
|
||||||
* need to have a try-catch block to free the allocated device memory
|
* need to have a try-catch block to free the allocated device memory
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -147,7 +147,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl {
|
|||||||
/* host const void pointer to const void device pointer */
|
/* host const void pointer to const void device pointer */
|
||||||
CUDA4DNN_HOST_DEVICE explicit DevicePtr(pointer ptr_) noexcept : ptr{ ptr_ } { }
|
CUDA4DNN_HOST_DEVICE explicit DevicePtr(pointer ptr_) noexcept : ptr{ ptr_ } { }
|
||||||
|
|
||||||
/* allow any device pointer to be implicitly convereted to void device pointer */
|
/* allow any device pointer to be implicitly converted to void device pointer */
|
||||||
template <class T>
|
template <class T>
|
||||||
CUDA4DNN_HOST_DEVICE DevicePtr(DevicePtr<T> ptr_) noexcept : ptr{ ptr_.get() } { }
|
CUDA4DNN_HOST_DEVICE DevicePtr(DevicePtr<T> ptr_) noexcept : ptr{ ptr_.get() } { }
|
||||||
|
|
||||||
@ -199,7 +199,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl {
|
|||||||
/* host pointer to device pointer */
|
/* host pointer to device pointer */
|
||||||
CUDA4DNN_HOST_DEVICE explicit DevicePtr(pointer ptr_) noexcept : ptr{ ptr_ } { }
|
CUDA4DNN_HOST_DEVICE explicit DevicePtr(pointer ptr_) noexcept : ptr{ ptr_ } { }
|
||||||
|
|
||||||
/* allow any device pointer to mutable memory to be implicitly convereted to void device pointer */
|
/* allow any device pointer to mutable memory to be implicitly converted to void device pointer */
|
||||||
template <class T, typename std::enable_if<!std::is_const<T>::value, bool>::type = false>
|
template <class T, typename std::enable_if<!std::is_const<T>::value, bool>::type = false>
|
||||||
CUDA4DNN_HOST_DEVICE DevicePtr(DevicePtr<T> ptr_) noexcept : ptr { ptr_.get() } { }
|
CUDA4DNN_HOST_DEVICE DevicePtr(DevicePtr<T> ptr_) noexcept : ptr { ptr_.get() } { }
|
||||||
|
|
||||||
|
@ -791,7 +791,7 @@ namespace cv {
|
|||||||
if (layers_vec.size() > 1)
|
if (layers_vec.size() > 1)
|
||||||
{
|
{
|
||||||
// layer ids in layers_vec - inputs of Slice layers
|
// layer ids in layers_vec - inputs of Slice layers
|
||||||
// after adding offset to layers_vec: layer ids - ouputs of Slice layers
|
// after adding offset to layers_vec: layer ids - outputs of Slice layers
|
||||||
for (size_t k = 0; k < layers_vec.size(); ++k)
|
for (size_t k = 0; k < layers_vec.size(); ++k)
|
||||||
layers_vec[k] += layers_vec.size();
|
layers_vec[k] += layers_vec.size();
|
||||||
|
|
||||||
|
@ -799,7 +799,7 @@ struct TextRecognitionModel_Impl : public Model::Impl
|
|||||||
|
|
||||||
virtual
|
virtual
|
||||||
std::string ctcPrefixBeamSearchDecode(const Mat& prediction) {
|
std::string ctcPrefixBeamSearchDecode(const Mat& prediction) {
|
||||||
// CTC prefix beam seach decode.
|
// CTC prefix beam search decode.
|
||||||
// For more detail, refer to:
|
// For more detail, refer to:
|
||||||
// https://distill.pub/2017/ctc/#inference
|
// https://distill.pub/2017/ctc/#inference
|
||||||
// https://gist.github.com/awni/56369a90d03953e370f3964c826ed4b0i
|
// https://gist.github.com/awni/56369a90d03953e370f3964c826ed4b0i
|
||||||
|
@ -331,7 +331,7 @@ teng_graph_t tengine_init(const char* layer_name, float* input_, int inch, int g
|
|||||||
teg_weight = kernel_;
|
teg_weight = kernel_;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* initial the resoruce of tengine */
|
/* initial the resource of tengine */
|
||||||
if(false == tengine_init_flag)
|
if(false == tengine_init_flag)
|
||||||
{
|
{
|
||||||
init_tengine();
|
init_tengine();
|
||||||
|
@ -290,7 +290,7 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath
|
|||||||
if (cvtest::debugLevel > 0)
|
if (cvtest::debugLevel > 0)
|
||||||
{
|
{
|
||||||
const std::vector<size_t>& dims = desc.getDims();
|
const std::vector<size_t>& dims = desc.getDims();
|
||||||
std::cout << "Input: '" << it.first << "' precison=" << desc.getPrecision() << " dims=" << dims.size() << " [";
|
std::cout << "Input: '" << it.first << "' precision=" << desc.getPrecision() << " dims=" << dims.size() << " [";
|
||||||
for (auto d : dims)
|
for (auto d : dims)
|
||||||
std::cout << " " << d;
|
std::cout << " " << d;
|
||||||
std::cout << "] ocv_mat=" << inputsMap[it.first].size << " of " << typeToString(inputsMap[it.first].type()) << std::endl;
|
std::cout << "] ocv_mat=" << inputsMap[it.first].size << " of " << typeToString(inputsMap[it.first].type()) << std::endl;
|
||||||
@ -308,7 +308,7 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath
|
|||||||
if (cvtest::debugLevel > 0)
|
if (cvtest::debugLevel > 0)
|
||||||
{
|
{
|
||||||
const std::vector<size_t>& dims = desc.getDims();
|
const std::vector<size_t>& dims = desc.getDims();
|
||||||
std::cout << "Output: '" << it.first << "' precison=" << desc.getPrecision() << " dims=" << dims.size() << " [";
|
std::cout << "Output: '" << it.first << "' precision=" << desc.getPrecision() << " dims=" << dims.size() << " [";
|
||||||
for (auto d : dims)
|
for (auto d : dims)
|
||||||
std::cout << " " << d;
|
std::cout << " " << d;
|
||||||
std::cout << "] ocv_mat=" << outputsMap[it.first].size << " of " << typeToString(outputsMap[it.first].type()) << std::endl;
|
std::cout << "] ocv_mat=" << outputsMap[it.first].size << " of " << typeToString(outputsMap[it.first].type()) << std::endl;
|
||||||
|
@ -33,7 +33,7 @@ private:
|
|||||||
double highfreq = sample_rate / 2;
|
double highfreq = sample_rate / 2;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
// Mel filterbanks preperation
|
// Mel filterbanks preparation
|
||||||
double hz_to_mel(double frequencies)
|
double hz_to_mel(double frequencies)
|
||||||
{
|
{
|
||||||
//Converts frequencies from hz to mel scale
|
//Converts frequencies from hz to mel scale
|
||||||
@ -149,7 +149,7 @@ public:
|
|||||||
return weights;
|
return weights;
|
||||||
}
|
}
|
||||||
|
|
||||||
// STFT preperation
|
// STFT preparation
|
||||||
vector<double> pad_window_center(vector<double>&data, int size)
|
vector<double> pad_window_center(vector<double>&data, int size)
|
||||||
{
|
{
|
||||||
// Pad the window out to n_fft size
|
// Pad the window out to n_fft size
|
||||||
|
@ -44,7 +44,7 @@ import os
|
|||||||
model.graph.initializer.insert(i,init)
|
model.graph.initializer.insert(i,init)
|
||||||
```
|
```
|
||||||
|
|
||||||
6. Add an additional reshape node to handle the inconsistant input from python and c++ of openCV.
|
6. Add an additional reshape node to handle the inconsistent input from python and c++ of openCV.
|
||||||
see https://github.com/opencv/opencv/issues/19091
|
see https://github.com/opencv/opencv/issues/19091
|
||||||
Make & insert a new node with 'Reshape' operation & required initializer
|
Make & insert a new node with 'Reshape' operation & required initializer
|
||||||
```
|
```
|
||||||
@ -256,7 +256,7 @@ class FilterbankFeatures:
|
|||||||
weights *= enorm[:, np.newaxis]
|
weights *= enorm[:, np.newaxis]
|
||||||
return weights
|
return weights
|
||||||
|
|
||||||
# STFT preperation
|
# STFT preparation
|
||||||
def pad_window_center(self, data, size, axis=-1, **kwargs):
|
def pad_window_center(self, data, size, axis=-1, **kwargs):
|
||||||
'''
|
'''
|
||||||
Centers the data and pads.
|
Centers the data and pads.
|
||||||
@ -329,7 +329,7 @@ class FilterbankFeatures:
|
|||||||
then padded with zeros to match n_fft
|
then padded with zeros to match n_fft
|
||||||
fft_window : a vector or array of length `n_fft` having values computed by a
|
fft_window : a vector or array of length `n_fft` having values computed by a
|
||||||
window function
|
window function
|
||||||
pad_mode : mode while padding the singnal
|
pad_mode : mode while padding the signal
|
||||||
return_complex : returns array with complex data type if `True`
|
return_complex : returns array with complex data type if `True`
|
||||||
return : Matrix of short-term Fourier transform coefficients.
|
return : Matrix of short-term Fourier transform coefficients.
|
||||||
'''
|
'''
|
||||||
|
Loading…
Reference in New Issue
Block a user