Merge remote-tracking branch 'upstream/3.4' into merge-3.4

This commit is contained in:
Alexander Alekhin 2018-04-24 18:13:06 +03:00
commit cd2b188c9a
78 changed files with 2011 additions and 931 deletions

View File

@ -469,8 +469,9 @@ class TextFormat::Parser::ParserImpl {
"\" has no field named \"" + field_name + "\"."); "\" has no field named \"" + field_name + "\".");
return false; return false;
} else { } else {
ReportWarning("Message type \"" + descriptor->full_name() + // No warnings to let user define custom layers (see https://github.com/opencv/opencv/pull/11129)
"\" has no field named \"" + field_name + "\"."); // ReportWarning("Message type \"" + descriptor->full_name() +
// "\" has no field named \"" + field_name + "\".");
} }
} }
} }
@ -485,10 +486,13 @@ class TextFormat::Parser::ParserImpl {
// start with "{" or "<" which indicates the beginning of a message body. // start with "{" or "<" which indicates the beginning of a message body.
// If there is no ":" or there is a "{" or "<" after ":", this field has // If there is no ":" or there is a "{" or "<" after ":", this field has
// to be a message or the input is ill-formed. // to be a message or the input is ill-formed.
UnknownFieldSet* unknown_fields = reflection->MutableUnknownFields(message);
if (TryConsume(":") && !LookingAt("{") && !LookingAt("<")) { if (TryConsume(":") && !LookingAt("{") && !LookingAt("<")) {
return SkipFieldValue(); UnknownFieldSet* unknown_field = unknown_fields->AddGroup(unknown_fields->field_count());
unknown_field->AddLengthDelimited(0, field_name); // Add a field's name.
return SkipFieldValue(unknown_field);
} else { } else {
return SkipFieldMessage(); return SkipFieldMessage(unknown_fields);
} }
} }
@ -571,7 +575,7 @@ label_skip_parsing:
} }
// Skips the next field including the field's name and value. // Skips the next field including the field's name and value.
bool SkipField() { bool SkipField(UnknownFieldSet* unknown_fields) {
string field_name; string field_name;
if (TryConsume("[")) { if (TryConsume("[")) {
// Extension name. // Extension name.
@ -588,9 +592,11 @@ label_skip_parsing:
// If there is no ":" or there is a "{" or "<" after ":", this field has // If there is no ":" or there is a "{" or "<" after ":", this field has
// to be a message or the input is ill-formed. // to be a message or the input is ill-formed.
if (TryConsume(":") && !LookingAt("{") && !LookingAt("<")) { if (TryConsume(":") && !LookingAt("{") && !LookingAt("<")) {
DO(SkipFieldValue()); UnknownFieldSet* unknown_field = unknown_fields->AddGroup(unknown_fields->field_count());
unknown_field->AddLengthDelimited(0, field_name); // Add a field's name.
DO(SkipFieldValue(unknown_field));
} else { } else {
DO(SkipFieldMessage()); DO(SkipFieldMessage(unknown_fields));
} }
// For historical reasons, fields may optionally be separated by commas or // For historical reasons, fields may optionally be separated by commas or
// semicolons. // semicolons.
@ -625,11 +631,11 @@ label_skip_parsing:
// Skips the whole body of a message including the beginning delimiter and // Skips the whole body of a message including the beginning delimiter and
// the ending delimiter. // the ending delimiter.
bool SkipFieldMessage() { bool SkipFieldMessage(UnknownFieldSet* unknown_fields) {
string delimiter; string delimiter;
DO(ConsumeMessageDelimiter(&delimiter)); DO(ConsumeMessageDelimiter(&delimiter));
while (!LookingAt(">") && !LookingAt("}")) { while (!LookingAt(">") && !LookingAt("}")) {
DO(SkipField()); DO(SkipField(unknown_fields));
} }
DO(Consume(delimiter)); DO(Consume(delimiter));
return true; return true;
@ -769,7 +775,7 @@ label_skip_parsing:
return true; return true;
} }
bool SkipFieldValue() { bool SkipFieldValue(UnknownFieldSet* unknown_field) {
if (LookingAtType(io::Tokenizer::TYPE_STRING)) { if (LookingAtType(io::Tokenizer::TYPE_STRING)) {
while (LookingAtType(io::Tokenizer::TYPE_STRING)) { while (LookingAtType(io::Tokenizer::TYPE_STRING)) {
tokenizer_.Next(); tokenizer_.Next();
@ -779,9 +785,9 @@ label_skip_parsing:
if (TryConsume("[")) { if (TryConsume("[")) {
while (true) { while (true) {
if (!LookingAt("{") && !LookingAt("<")) { if (!LookingAt("{") && !LookingAt("<")) {
DO(SkipFieldValue()); DO(SkipFieldValue(unknown_field));
} else { } else {
DO(SkipFieldMessage()); DO(SkipFieldMessage(unknown_field));
} }
if (TryConsume("]")) { if (TryConsume("]")) {
break; break;
@ -833,6 +839,8 @@ label_skip_parsing:
return false; return false;
} }
} }
// Use a tag 1 because tag 0 is used for field's name.
unknown_field->AddLengthDelimited(1, tokenizer_.current().text);
tokenizer_.Next(); tokenizer_.Next();
return true; return true;
} }
@ -1298,13 +1306,13 @@ class TextFormat::Printer::TextGenerator
TextFormat::Finder::~Finder() { TextFormat::Finder::~Finder() {
} }
TextFormat::Parser::Parser() TextFormat::Parser::Parser(bool allow_unknown_field)
: error_collector_(NULL), : error_collector_(NULL),
finder_(NULL), finder_(NULL),
parse_info_tree_(NULL), parse_info_tree_(NULL),
allow_partial_(false), allow_partial_(false),
allow_case_insensitive_field_(false), allow_case_insensitive_field_(false),
allow_unknown_field_(false), allow_unknown_field_(allow_unknown_field),
allow_unknown_enum_(false), allow_unknown_enum_(false),
allow_field_number_(false), allow_field_number_(false),
allow_relaxed_whitespace_(false), allow_relaxed_whitespace_(false),

View File

@ -457,7 +457,7 @@ class LIBPROTOBUF_EXPORT TextFormat {
// For more control over parsing, use this class. // For more control over parsing, use this class.
class LIBPROTOBUF_EXPORT Parser { class LIBPROTOBUF_EXPORT Parser {
public: public:
Parser(); Parser(bool allow_unknown_field = false);
~Parser(); ~Parser();
// Like TextFormat::Parse(). // Like TextFormat::Parse().

View File

@ -258,7 +258,10 @@ if(X86 OR X86_64)
endif() endif()
if(NOT DEFINED CPU_BASELINE) if(NOT DEFINED CPU_BASELINE)
if(X86_64) if(APPLE)
# MacOS X has limited set of possible supported H/W, so compiler is configured well
set(CPU_BASELINE "DETECT" CACHE STRING "${HELP_CPU_BASELINE}")
elseif(X86_64)
set(CPU_BASELINE "SSE3" CACHE STRING "${HELP_CPU_BASELINE}") set(CPU_BASELINE "SSE3" CACHE STRING "${HELP_CPU_BASELINE}")
else() else()
set(CPU_BASELINE "SSE2" CACHE STRING "${HELP_CPU_BASELINE}") set(CPU_BASELINE "SSE2" CACHE STRING "${HELP_CPU_BASELINE}")

View File

@ -120,7 +120,9 @@ if(CV_GCC OR CV_CLANG)
add_extra_compiler_option(-Wno-unnamed-type-template-args) add_extra_compiler_option(-Wno-unnamed-type-template-args)
add_extra_compiler_option(-Wno-comment) add_extra_compiler_option(-Wno-comment)
if(NOT OPENCV_SKIP_IMPLICIT_FALLTHROUGH if(NOT OPENCV_SKIP_IMPLICIT_FALLTHROUGH
AND NOT " ${CMAKE_CXX_FLAGS} ${OPENCV_EXTRA_FLAGS} ${OPENCV_EXTRA_CXX_FLAGS}" MATCHES "implicit-fallthrough") AND NOT " ${CMAKE_CXX_FLAGS} ${OPENCV_EXTRA_FLAGS} ${OPENCV_EXTRA_CXX_FLAGS}" MATCHES "implicit-fallthrough"
AND (CV_GCC AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.0.0)
)
add_extra_compiler_option(-Wimplicit-fallthrough=3) add_extra_compiler_option(-Wimplicit-fallthrough=3)
endif() endif()
if(CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 7.2.0) if(CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 7.2.0)

View File

@ -4,12 +4,14 @@
# define CV_POPCNT_U64 _mm_popcnt_u64 # define CV_POPCNT_U64 _mm_popcnt_u64
# endif # endif
# define CV_POPCNT_U32 _mm_popcnt_u32 # define CV_POPCNT_U32 _mm_popcnt_u32
#else #elif defined(__POPCNT__)
# include <popcntintrin.h> # include <popcntintrin.h>
# if defined(__x86_64__) # if defined(__x86_64__)
# define CV_POPCNT_U64 __builtin_popcountll # define CV_POPCNT_U64 __builtin_popcountll
# endif # endif
# define CV_POPCNT_U32 __builtin_popcount # define CV_POPCNT_U32 __builtin_popcount
#else
# error "__POPCNT__ is not defined by compiler"
#endif #endif
int main() int main()

View File

@ -0,0 +1,192 @@
# Custom deep learning layers support {#tutorial_dnn_custom_layers}
## Introduction
Deep learning is a fast growing area. The new approaches to build neural networks
usually introduce new types of layers. They could be modifications of existing
ones or implement outstanding researching ideas.
OpenCV gives an opportunity to import and run networks from different deep learning
frameworks. There are a number of the most popular layers. However you can face
a problem that your network cannot be imported using OpenCV because of unimplemented layers.
The first solution is to create a feature request at https://github.com/opencv/opencv/issues
mentioning details such a source of model and type of new layer. A new layer could
be implemented if OpenCV community shares this need.
The second way is to define a **custom layer** so OpenCV's deep learning engine
will know how to use it. This tutorial is dedicated to show you a process of deep
learning models import customization.
## Define a custom layer in C++
Deep learning layer is a building block of network's pipeline.
It has connections to **input blobs** and produces results to **output blobs**.
There are trained **weights** and **hyper-parameters**.
Layers' names, types, weights and hyper-parameters are stored in files are generated by
native frameworks during training. If OpenCV mets unknown layer type it throws an
exception trying to read a model:
```
Unspecified error: Can't create layer "layer_name" of type "MyType" in function getLayerInstance
```
To import the model correctly you have to derive a class from cv::dnn::Layer with
the following methods:
@snippet dnn/custom_layers.cpp A custom layer interface
And register it before the import:
@snippet dnn/custom_layers.cpp Register a custom layer
@note `MyType` is a type of unimplemented layer from the thrown exception.
Let's see what all the methods do:
- Constructor
@snippet dnn/custom_layers.cpp MyLayer::MyLayer
Retrieves hyper-parameters from cv::dnn::LayerParams. If your layer has trainable
weights they will be already stored in the Layer's member cv::dnn::Layer::blobs.
- A static method `create`
@snippet dnn/custom_layers.cpp MyLayer::create
This method should create an instance of you layer and return cv::Ptr with it.
- Output blobs' shape computation
@snippet dnn/custom_layers.cpp MyLayer::getMemoryShapes
Returns layer's output shapes depends on input shapes. You may request an extra
memory using `internals`.
- Run a layer
@snippet dnn/custom_layers.cpp MyLayer::forward
Implement a layer's logic here. Compute outputs for given inputs.
@note OpenCV manages memory allocated for layers. In the most cases the same memory
can be reused between layers. So your `forward` implementation should not rely that
the second invocation of `forward` will has the same data at `outputs` and `internals`.
- Optional `finalize` method
@snippet dnn/custom_layers.cpp MyLayer::finalize
The chain of methods are the following: OpenCV deep learning engine calls `create`
method once then it calls `getMemoryShapes` for an every created layer then you
can make some preparations depends on known input dimensions at cv::dnn::Layer::finalize.
After network was initialized only `forward` method is called for an every network's input.
@note Varying input blobs' sizes such height or width or batch size you make OpenCV
reallocate all the internal memory. That leads efficiency gaps. Try to initialize
and deploy models using a fixed batch size and image's dimensions.
## Example: custom layer from Caffe
Let's create a custom layer `Interp` from https://github.com/cdmh/deeplab-public.
It's just a simple resize that takes an input blob of size `N x C x Hi x Wi` and returns
an output blob of size `N x C x Ho x Wo` where `N` is a batch size, `C` is a number of channels,
`Hi x Wi` and `Ho x Wo` are input and output `height x width` correspondingly.
This layer has no trainable weights but it has hyper-parameters to specify an output size.
In example,
~~~~~~~~~~~~~
layer {
name: "output"
type: "Interp"
bottom: "input"
top: "output"
interp_param {
height: 9
width: 8
}
}
~~~~~~~~~~~~~
This way our implementation can look like:
@snippet dnn/custom_layers.cpp InterpLayer
Next we need to register a new layer type and try to import the model.
@snippet dnn/custom_layers.cpp Register InterpLayer
## Example: custom layer from TensorFlow
This is an example of how to import a network with [tf.image.resize_bilinear](https://www.tensorflow.org/versions/master/api_docs/python/tf/image/resize_bilinear)
operation. This is also a resize but with an implementation different from OpenCV's or `Interp` above.
Let's create a single layer network:
~~~~~~~~~~~~~{.py}
inp = tf.placeholder(tf.float32, [2, 3, 4, 5], 'input')
resized = tf.image.resize_bilinear(inp, size=[9, 8], name='resize_bilinear')
~~~~~~~~~~~~~
OpenCV sees that TensorFlow's graph in the following way:
```
node {
name: "input"
op: "Placeholder"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
}
node {
name: "resize_bilinear/size"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 2
}
}
tensor_content: "\t\000\000\000\010\000\000\000"
}
}
}
}
node {
name: "resize_bilinear"
op: "ResizeBilinear"
input: "input:0"
input: "resize_bilinear/size"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
attr {
key: "align_corners"
value {
b: false
}
}
}
library {
}
```
Custom layers import from TensorFlow is designed to put all layer's `attr` into
cv::dnn::LayerParams but input `Const` blobs into cv::dnn::Layer::blobs.
In our case resize's output shape will be stored in layer's `blobs[0]`.
@snippet dnn/custom_layers.cpp ResizeBilinearLayer
Next we register a layer and try to import the model.
@snippet dnn/custom_layers.cpp Register ResizeBilinearLayer

View File

@ -48,3 +48,11 @@ Deep Neural Networks (dnn module) {#tutorial_table_of_content_dnn}
*Author:* Dmitry Kurtaev *Author:* Dmitry Kurtaev
In this tutorial we'll run deep learning models in browser using OpenCV.js. In this tutorial we'll run deep learning models in browser using OpenCV.js.
- @subpage tutorial_dnn_custom_layers
*Compatibility:* \> OpenCV 3.4.1
*Author:* Dmitry Kurtaev
How to define custom layers to import networks.

View File

@ -751,12 +751,8 @@ bool CirclesGridFinder::isDetectionCorrect()
} }
return (vertices.size() == largeHeight * largeWidth + smallHeight * smallWidth); return (vertices.size() == largeHeight * largeWidth + smallHeight * smallWidth);
} }
default:
CV_Error(0, "Unknown pattern type");
} }
CV_Error(Error::StsBadArg, "Unknown pattern type");
return false;
} }
void CirclesGridFinder::findMCS(const std::vector<Point2f> &basis, std::vector<Graph> &basisGraphs) void CirclesGridFinder::findMCS(const std::vector<Point2f> &basis, std::vector<Graph> &basisGraphs)

View File

@ -371,7 +371,7 @@ It is possible to alternate error processing by using redirectError().
@param _func - function name. Available only when the compiler supports getting it @param _func - function name. Available only when the compiler supports getting it
@param _file - source file name where the error has occurred @param _file - source file name where the error has occurred
@param _line - line number in the source file where the error has occurred @param _line - line number in the source file where the error has occurred
@see CV_Error, CV_Error_, CV_ErrorNoReturn, CV_ErrorNoReturn_, CV_Assert, CV_DbgAssert @see CV_Error, CV_Error_, CV_Assert, CV_DbgAssert
*/ */
CV_EXPORTS void error(int _code, const String& _err, const char* _func, const char* _file, int _line); CV_EXPORTS void error(int _code, const String& _err, const char* _func, const char* _file, int _line);
@ -414,8 +414,6 @@ CV_INLINE CV_NORETURN void errorNoReturn(int _code, const String& _err, const ch
// We need to use simplified definition for them. // We need to use simplified definition for them.
#define CV_Error(...) do { abort(); } while (0) #define CV_Error(...) do { abort(); } while (0)
#define CV_Error_( code, args ) do { cv::format args; abort(); } while (0) #define CV_Error_( code, args ) do { cv::format args; abort(); } while (0)
#define CV_ErrorNoReturn(...) do { abort(); } while (0)
#define CV_ErrorNoReturn_(...) do { abort(); } while (0)
#define CV_Assert_1( expr ) do { if (!(expr)) abort(); } while (0) #define CV_Assert_1( expr ) do { if (!(expr)) abort(); } while (0)
#else // CV_STATIC_ANALYSIS #else // CV_STATIC_ANALYSIS
@ -446,22 +444,22 @@ for example:
*/ */
#define CV_Error_( code, args ) cv::error( code, cv::format args, CV_Func, __FILE__, __LINE__ ) #define CV_Error_( code, args ) cv::error( code, cv::format args, CV_Func, __FILE__, __LINE__ )
/** same as CV_Error(code,msg), but does not return */
#define CV_ErrorNoReturn( code, msg ) cv::errorNoReturn( code, msg, CV_Func, __FILE__, __LINE__ )
/** same as CV_Error_(code,args), but does not return */
#define CV_ErrorNoReturn_( code, args ) cv::errorNoReturn( code, cv::format args, CV_Func, __FILE__, __LINE__ )
#define CV_Assert_1( expr ) if(!!(expr)) ; else cv::error( cv::Error::StsAssert, #expr, CV_Func, __FILE__, __LINE__ ) #define CV_Assert_1( expr ) if(!!(expr)) ; else cv::error( cv::Error::StsAssert, #expr, CV_Func, __FILE__, __LINE__ )
//! @cond IGNORED //! @cond IGNORED
#define CV__ErrorNoReturn( code, msg ) cv::errorNoReturn( code, msg, CV_Func, __FILE__, __LINE__ )
#define CV__ErrorNoReturn_( code, args ) cv::errorNoReturn( code, cv::format args, CV_Func, __FILE__, __LINE__ )
#ifdef __OPENCV_BUILD #ifdef __OPENCV_BUILD
#undef CV_Error #undef CV_Error
#define CV_Error CV_ErrorNoReturn #define CV_Error CV__ErrorNoReturn
#undef CV_Error_ #undef CV_Error_
#define CV_Error_ CV_ErrorNoReturn_ #define CV_Error_ CV__ErrorNoReturn_
#undef CV_Assert_1 #undef CV_Assert_1
#define CV_Assert_1( expr ) if(!!(expr)) ; else cv::errorNoReturn( cv::Error::StsAssert, #expr, CV_Func, __FILE__, __LINE__ ) #define CV_Assert_1( expr ) if(!!(expr)) ; else cv::errorNoReturn( cv::Error::StsAssert, #expr, CV_Func, __FILE__, __LINE__ )
#else
// backward compatibility
#define CV_ErrorNoReturn CV__ErrorNoReturn
#define CV_ErrorNoReturn_ CV__ErrorNoReturn_
#endif #endif
//! @endcond //! @endcond

View File

@ -1042,13 +1042,16 @@ template<typename _Tp, int n> inline bool v_check_any(const v_reg<_Tp, n>& a)
return false; return false;
} }
/** @brief Bitwise select /** @brief Per-element select (blend operation)
Return value will be built by combining values a and b using the following scheme: Return value will be built by combining values _a_ and _b_ using the following scheme:
If the i-th bit in _mask_ is 1 result[i] = mask[i] ? a[i] : b[i];
select i-th bit from _a_
else @note: _mask_ element values are restricted to these values:
select i-th bit from _b_ */ - 0: select element from _b_
- 0xff/0xffff/etc: select element from _a_
(fully compatible with bitwise-based operator)
*/
template<typename _Tp, int n> inline v_reg<_Tp, n> v_select(const v_reg<_Tp, n>& mask, template<typename _Tp, int n> inline v_reg<_Tp, n> v_select(const v_reg<_Tp, n>& mask,
const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b)
{ {
@ -1058,8 +1061,8 @@ template<typename _Tp, int n> inline v_reg<_Tp, n> v_select(const v_reg<_Tp, n>&
for( int i = 0; i < n; i++ ) for( int i = 0; i < n; i++ )
{ {
int_type m = Traits::reinterpret_int(mask.s[i]); int_type m = Traits::reinterpret_int(mask.s[i]);
c.s[i] = Traits::reinterpret_from_int((Traits::reinterpret_int(a.s[i]) & m) CV_DbgAssert(m == 0 || m == (~(int_type)0)); // restrict mask values: 0 or 0xff/0xffff/etc
| (Traits::reinterpret_int(b.s[i]) & ~m)); c.s[i] = m ? a.s[i] : b.s[i];
} }
return c; return c;
} }

View File

@ -438,10 +438,14 @@ void v_rshr_pack_store(schar* ptr, const v_int16x8& a)
} }
// bit-wise "mask ? a : b" // byte-wise "mask ? a : b"
inline __m128i v_select_si128(__m128i mask, __m128i a, __m128i b) inline __m128i v_select_si128(__m128i mask, __m128i a, __m128i b)
{ {
#if CV_SSE4_1
return _mm_blendv_epi8(b, a, mask);
#else
return _mm_xor_si128(b, _mm_and_si128(_mm_xor_si128(a, b), mask)); return _mm_xor_si128(b, _mm_and_si128(_mm_xor_si128(a, b), mask));
#endif
} }
inline v_uint16x8 v_pack(const v_uint32x4& a, const v_uint32x4& b) inline v_uint16x8 v_pack(const v_uint32x4& a, const v_uint32x4& b)
@ -1403,6 +1407,26 @@ OPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_int32x4, epi8, v_packq_epi32, OPENCV_HAL_AND,
OPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_float32x4, ps, OPENCV_HAL_NOP, OPENCV_HAL_1ST, 15, 15) OPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_float32x4, ps, OPENCV_HAL_NOP, OPENCV_HAL_1ST, 15, 15)
OPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_float64x2, pd, OPENCV_HAL_NOP, OPENCV_HAL_1ST, 3, 3) OPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_float64x2, pd, OPENCV_HAL_NOP, OPENCV_HAL_1ST, 3, 3)
#if CV_SSE4_1
#define OPENCV_HAL_IMPL_SSE_SELECT(_Tpvec, cast_ret, cast, suffix) \
inline _Tpvec v_select(const _Tpvec& mask, const _Tpvec& a, const _Tpvec& b) \
{ \
return _Tpvec(cast_ret(_mm_blendv_##suffix(cast(b.val), cast(a.val), cast(mask.val)))); \
}
OPENCV_HAL_IMPL_SSE_SELECT(v_uint8x16, OPENCV_HAL_NOP, OPENCV_HAL_NOP, epi8)
OPENCV_HAL_IMPL_SSE_SELECT(v_int8x16, OPENCV_HAL_NOP, OPENCV_HAL_NOP, epi8)
OPENCV_HAL_IMPL_SSE_SELECT(v_uint16x8, OPENCV_HAL_NOP, OPENCV_HAL_NOP, epi8)
OPENCV_HAL_IMPL_SSE_SELECT(v_int16x8, OPENCV_HAL_NOP, OPENCV_HAL_NOP, epi8)
OPENCV_HAL_IMPL_SSE_SELECT(v_uint32x4, _mm_castps_si128, _mm_castsi128_ps, ps)
OPENCV_HAL_IMPL_SSE_SELECT(v_int32x4, _mm_castps_si128, _mm_castsi128_ps, ps)
// OPENCV_HAL_IMPL_SSE_SELECT(v_uint64x2, TBD, TBD, pd)
// OPENCV_HAL_IMPL_SSE_SELECT(v_int64x2, TBD, TBD, ps)
OPENCV_HAL_IMPL_SSE_SELECT(v_float32x4, OPENCV_HAL_NOP, OPENCV_HAL_NOP, ps)
OPENCV_HAL_IMPL_SSE_SELECT(v_float64x2, OPENCV_HAL_NOP, OPENCV_HAL_NOP, pd)
#else // CV_SSE4_1
#define OPENCV_HAL_IMPL_SSE_SELECT(_Tpvec, suffix) \ #define OPENCV_HAL_IMPL_SSE_SELECT(_Tpvec, suffix) \
inline _Tpvec v_select(const _Tpvec& mask, const _Tpvec& a, const _Tpvec& b) \ inline _Tpvec v_select(const _Tpvec& mask, const _Tpvec& a, const _Tpvec& b) \
{ \ { \
@ -1419,6 +1443,7 @@ OPENCV_HAL_IMPL_SSE_SELECT(v_int32x4, si128)
// OPENCV_HAL_IMPL_SSE_SELECT(v_int64x2, si128) // OPENCV_HAL_IMPL_SSE_SELECT(v_int64x2, si128)
OPENCV_HAL_IMPL_SSE_SELECT(v_float32x4, ps) OPENCV_HAL_IMPL_SSE_SELECT(v_float32x4, ps)
OPENCV_HAL_IMPL_SSE_SELECT(v_float64x2, pd) OPENCV_HAL_IMPL_SSE_SELECT(v_float64x2, pd)
#endif
#define OPENCV_HAL_IMPL_SSE_EXPAND(_Tpuvec, _Tpwuvec, _Tpu, _Tpsvec, _Tpwsvec, _Tps, suffix, wsuffix, shift) \ #define OPENCV_HAL_IMPL_SSE_EXPAND(_Tpuvec, _Tpwuvec, _Tpu, _Tpsvec, _Tpwsvec, _Tps, suffix, wsuffix, shift) \
inline void v_expand(const _Tpuvec& a, _Tpwuvec& b0, _Tpwuvec& b1) \ inline void v_expand(const _Tpuvec& a, _Tpwuvec& b0, _Tpwuvec& b1) \
@ -1607,6 +1632,28 @@ inline void v_load_deinterleave(const uchar* ptr, v_uint8x16& a, v_uint8x16& b)
inline void v_load_deinterleave(const uchar* ptr, v_uint8x16& a, v_uint8x16& b, v_uint8x16& c) inline void v_load_deinterleave(const uchar* ptr, v_uint8x16& a, v_uint8x16& b, v_uint8x16& c)
{ {
#if CV_SSSE3
static const __m128i m0 = _mm_setr_epi8(0, 3, 6, 9, 12, 15, 1, 4, 7, 10, 13, 2, 5, 8, 11, 14);
static const __m128i m1 = _mm_alignr_epi8(m0, m0, 11);
static const __m128i m2 = _mm_alignr_epi8(m0, m0, 6);
__m128i t0 = _mm_loadu_si128((const __m128i*)ptr);
__m128i t1 = _mm_loadu_si128((const __m128i*)(ptr + 16));
__m128i t2 = _mm_loadu_si128((const __m128i*)(ptr + 32));
__m128i s0 = _mm_shuffle_epi8(t0, m0);
__m128i s1 = _mm_shuffle_epi8(t1, m1);
__m128i s2 = _mm_shuffle_epi8(t2, m2);
t0 = _mm_alignr_epi8(s1, _mm_slli_si128(s0, 10), 5);
a.val = _mm_alignr_epi8(s2, t0, 5);
t1 = _mm_alignr_epi8(_mm_srli_si128(s1, 5), _mm_slli_si128(s0, 5), 6);
b.val = _mm_alignr_epi8(_mm_srli_si128(s2, 5), t1, 5);
t2 = _mm_alignr_epi8(_mm_srli_si128(s2, 10), s1, 11);
c.val = _mm_alignr_epi8(t2, s0, 11);
#else
__m128i t00 = _mm_loadu_si128((const __m128i*)ptr); __m128i t00 = _mm_loadu_si128((const __m128i*)ptr);
__m128i t01 = _mm_loadu_si128((const __m128i*)(ptr + 16)); __m128i t01 = _mm_loadu_si128((const __m128i*)(ptr + 16));
__m128i t02 = _mm_loadu_si128((const __m128i*)(ptr + 32)); __m128i t02 = _mm_loadu_si128((const __m128i*)(ptr + 32));
@ -1626,6 +1673,7 @@ inline void v_load_deinterleave(const uchar* ptr, v_uint8x16& a, v_uint8x16& b,
a.val = _mm_unpacklo_epi8(t30, _mm_unpackhi_epi64(t31, t31)); a.val = _mm_unpacklo_epi8(t30, _mm_unpackhi_epi64(t31, t31));
b.val = _mm_unpacklo_epi8(_mm_unpackhi_epi64(t30, t30), t32); b.val = _mm_unpacklo_epi8(_mm_unpackhi_epi64(t30, t30), t32);
c.val = _mm_unpacklo_epi8(t31, _mm_unpackhi_epi64(t32, t32)); c.val = _mm_unpacklo_epi8(t31, _mm_unpackhi_epi64(t32, t32));
#endif
} }
inline void v_load_deinterleave(const uchar* ptr, v_uint8x16& a, v_uint8x16& b, v_uint8x16& c, v_uint8x16& d) inline void v_load_deinterleave(const uchar* ptr, v_uint8x16& a, v_uint8x16& b, v_uint8x16& c, v_uint8x16& d)
@ -1840,6 +1888,27 @@ inline void v_store_interleave( uchar* ptr, const v_uint8x16& a, const v_uint8x1
inline void v_store_interleave( uchar* ptr, const v_uint8x16& a, const v_uint8x16& b, inline void v_store_interleave( uchar* ptr, const v_uint8x16& a, const v_uint8x16& b,
const v_uint8x16& c ) const v_uint8x16& c )
{ {
#if CV_SSSE3
static const __m128i m0 = _mm_setr_epi8(0, 6, 11, 1, 7, 12, 2, 8, 13, 3, 9, 14, 4, 10, 15, 5);
static const __m128i m1 = _mm_setr_epi8(5, 11, 0, 6, 12, 1, 7, 13, 2, 8, 14, 3, 9, 15, 4, 10);
static const __m128i m2 = _mm_setr_epi8(10, 0, 5, 11, 1, 6, 12, 2, 7, 13, 3, 8, 14, 4, 9, 15);
__m128i t0 = _mm_alignr_epi8(b.val, _mm_slli_si128(a.val, 10), 5);
t0 = _mm_alignr_epi8(c.val, t0, 5);
__m128i s0 = _mm_shuffle_epi8(t0, m0);
__m128i t1 = _mm_alignr_epi8(_mm_srli_si128(b.val, 5), _mm_slli_si128(a.val, 5), 6);
t1 = _mm_alignr_epi8(_mm_srli_si128(c.val, 5), t1, 5);
__m128i s1 = _mm_shuffle_epi8(t1, m1);
__m128i t2 = _mm_alignr_epi8(_mm_srli_si128(c.val, 10), b.val, 11);
t2 = _mm_alignr_epi8(t2, a.val, 11);
__m128i s2 = _mm_shuffle_epi8(t2, m2);
_mm_storeu_si128((__m128i*)ptr, s0);
_mm_storeu_si128((__m128i*)(ptr + 16), s1);
_mm_storeu_si128((__m128i*)(ptr + 32), s2);
#else
__m128i z = _mm_setzero_si128(); __m128i z = _mm_setzero_si128();
__m128i ab0 = _mm_unpacklo_epi8(a.val, b.val); __m128i ab0 = _mm_unpacklo_epi8(a.val, b.val);
__m128i ab1 = _mm_unpackhi_epi8(a.val, b.val); __m128i ab1 = _mm_unpackhi_epi8(a.val, b.val);
@ -1881,6 +1950,7 @@ inline void v_store_interleave( uchar* ptr, const v_uint8x16& a, const v_uint8x1
_mm_storeu_si128((__m128i*)(ptr), v0); _mm_storeu_si128((__m128i*)(ptr), v0);
_mm_storeu_si128((__m128i*)(ptr + 16), v1); _mm_storeu_si128((__m128i*)(ptr + 16), v1);
_mm_storeu_si128((__m128i*)(ptr + 32), v2); _mm_storeu_si128((__m128i*)(ptr + 32), v2);
#endif
} }
inline void v_store_interleave( uchar* ptr, const v_uint8x16& a, const v_uint8x16& b, inline void v_store_interleave( uchar* ptr, const v_uint8x16& a, const v_uint8x16& b,

View File

@ -93,7 +93,7 @@ static void dumpOpenCLInformation()
const Device& device = Device::getDefault(); const Device& device = Device::getDefault();
if (!device.available()) if (!device.available())
CV_ErrorNoReturn(Error::OpenCLInitError, "OpenCL device is not available"); CV_Error(Error::OpenCLInitError, "OpenCL device is not available");
DUMP_MESSAGE_STDOUT("Current OpenCL device: "); DUMP_MESSAGE_STDOUT("Current OpenCL device: ");

View File

@ -76,7 +76,7 @@
#endif #endif
#ifndef CL_VERSION_1_2 #ifndef CL_VERSION_1_2
#define CV_REQUIRE_OPENCL_1_2_ERROR CV_ErrorNoReturn(cv::Error::OpenCLApiCallError, "OpenCV compiled without OpenCL v1.2 support, so we can't use functionality from OpenCL v1.2") #define CV_REQUIRE_OPENCL_1_2_ERROR CV_Error(cv::Error::OpenCLApiCallError, "OpenCV compiled without OpenCL v1.2 support, so we can't use functionality from OpenCL v1.2")
#endif #endif
#endif // HAVE_OPENCL #endif // HAVE_OPENCL

View File

@ -96,8 +96,8 @@ PERF_TEST_P(Size_MatType, Mat_Clone_Roi,
} }
PERF_TEST_P(Size_MatType, Mat_CopyToWithMask, PERF_TEST_P(Size_MatType, Mat_CopyToWithMask,
testing::Combine(testing::Values(TYPICAL_MAT_SIZES), testing::Combine(testing::Values(::perf::sz1080p, ::perf::szODD),
testing::Values(CV_8UC1, CV_8UC2)) testing::Values(CV_8UC1, CV_8UC2, CV_8UC3, CV_16UC1, CV_32SC1, CV_32FC4))
) )
{ {
const Size_MatType_t params = GetParam(); const Size_MatType_t params = GetParam();

View File

@ -53,7 +53,6 @@ namespace cv {
static void* OutOfMemoryError(size_t size) static void* OutOfMemoryError(size_t size)
{ {
CV_Error_(CV_StsNoMem, ("Failed to allocate %llu bytes", (unsigned long long)size)); CV_Error_(CV_StsNoMem, ("Failed to allocate %llu bytes", (unsigned long long)size));
return 0;
} }

View File

@ -1,3 +1,6 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "precomp.hpp" #include "precomp.hpp"
#include <sstream> #include <sstream>
@ -364,7 +367,6 @@ bool CommandLineParser::has(const String& name) const
} }
CV_Error_(Error::StsBadArg, ("undeclared key '%s' requested", name.c_str())); CV_Error_(Error::StsBadArg, ("undeclared key '%s' requested", name.c_str()));
return false;
} }
bool CommandLineParser::check() const bool CommandLineParser::check() const

View File

@ -91,11 +91,7 @@ copyMask_<uchar>(const uchar* _src, size_t sstep, const uchar* mask, size_t mste
uchar* dst = (uchar*)_dst; uchar* dst = (uchar*)_dst;
int x = 0; int x = 0;
#if CV_SIMD128 #if CV_SIMD128
if( hasSIMD128() {
#if CV_SSE4_2
&& USE_SSE4_2
#endif
) {
v_uint8x16 v_zero = v_setzero_u8(); v_uint8x16 v_zero = v_setzero_u8();
for( ; x <= size.width - 16; x += 16 ) for( ; x <= size.width - 16; x += 16 )
@ -104,11 +100,7 @@ copyMask_<uchar>(const uchar* _src, size_t sstep, const uchar* mask, size_t mste
v_dst = v_load(dst + x), v_dst = v_load(dst + x),
v_nmask = v_load(mask + x) == v_zero; v_nmask = v_load(mask + x) == v_zero;
#if CV_SSE4_2
v_dst = v_uint8x16(_mm_blendv_epi8(v_src.val, v_dst.val, v_nmask.val));
#else
v_dst = v_select(v_nmask, v_dst, v_src); v_dst = v_select(v_nmask, v_dst, v_src);
#endif
v_store(dst + x, v_dst); v_store(dst + x, v_dst);
} }
} }
@ -130,11 +122,7 @@ copyMask_<ushort>(const uchar* _src, size_t sstep, const uchar* mask, size_t mst
ushort* dst = (ushort*)_dst; ushort* dst = (ushort*)_dst;
int x = 0; int x = 0;
#if CV_SIMD128 #if CV_SIMD128
if( hasSIMD128() {
#if CV_SSE4_2
&& USE_SSE4_2
#endif
) {
v_uint8x16 v_zero = v_setzero_u8(); v_uint8x16 v_zero = v_setzero_u8();
for( ; x <= size.width - 16; x += 16 ) for( ; x <= size.width - 16; x += 16 )
@ -146,13 +134,8 @@ copyMask_<ushort>(const uchar* _src, size_t sstep, const uchar* mask, size_t mst
v_uint8x16 v_nmask = v_load(mask + x) == v_zero; v_uint8x16 v_nmask = v_load(mask + x) == v_zero;
v_zip(v_nmask, v_nmask, v_nmask1, v_nmask2); v_zip(v_nmask, v_nmask, v_nmask1, v_nmask2);
#if CV_SSE4_2
v_dst1 = v_uint16x8(_mm_blendv_epi8(v_src1.val, v_dst1.val, v_nmask1.val));
v_dst2 = v_uint16x8(_mm_blendv_epi8(v_src2.val, v_dst2.val, v_nmask2.val));
#else
v_dst1 = v_select(v_reinterpret_as_u16(v_nmask1), v_dst1, v_src1); v_dst1 = v_select(v_reinterpret_as_u16(v_nmask1), v_dst1, v_src1);
v_dst2 = v_select(v_reinterpret_as_u16(v_nmask2), v_dst2, v_src2); v_dst2 = v_select(v_reinterpret_as_u16(v_nmask2), v_dst2, v_src2);
#endif
v_store(dst + x, v_dst1); v_store(dst + x, v_dst1);
v_store(dst + x + 8, v_dst2); v_store(dst + x + 8, v_dst2);
} }

View File

@ -50,11 +50,11 @@
#include <vector> #include <vector>
# include "directx.inc.hpp" # include "directx.inc.hpp"
#else // HAVE_DIRECTX #else // HAVE_DIRECTX
#define NO_DIRECTX_SUPPORT_ERROR CV_ErrorNoReturn(cv::Error::StsBadFunc, "OpenCV was build without DirectX support") #define NO_DIRECTX_SUPPORT_ERROR CV_Error(cv::Error::StsBadFunc, "OpenCV was build without DirectX support")
#endif #endif
#ifndef HAVE_OPENCL #ifndef HAVE_OPENCL
# define NO_OPENCL_SUPPORT_ERROR CV_ErrorNoReturn(cv::Error::StsBadFunc, "OpenCV was build without OpenCL support") # define NO_OPENCL_SUPPORT_ERROR CV_Error(cv::Error::StsBadFunc, "OpenCV was build without OpenCL support")
#endif // HAVE_OPENCL #endif // HAVE_OPENCL
namespace cv { namespace directx { namespace cv { namespace directx {

View File

@ -143,6 +143,9 @@
return func; return func;
} }
#else #else
#if defined(_MSC_VER)
#pragma warning(disable : 4702) // unreachable code
#endif
static void* IntGetProcAddress(const char*) static void* IntGetProcAddress(const char*)
{ {
CV_Error(cv::Error::OpenGlNotSupported, "The library is compiled without OpenGL support"); CV_Error(cv::Error::OpenGlNotSupported, "The library is compiled without OpenGL support");

View File

@ -912,7 +912,6 @@ Mat Mat::reshape(int _cn, int _newndims, const int* _newsz) const
CV_Error(CV_StsNotImplemented, "Reshaping of n-dimensional non-continuous matrices is not supported yet"); CV_Error(CV_StsNotImplemented, "Reshaping of n-dimensional non-continuous matrices is not supported yet");
// TBD // TBD
return Mat();
} }
Mat Mat::reshape(int _cn, const std::vector<int>& _newshape) const Mat Mat::reshape(int _cn, const std::vector<int>& _newshape) const

View File

@ -179,7 +179,6 @@ Mat cvarrToMat(const CvArr* arr, bool copyData,
return buf; return buf;
} }
CV_Error(CV_StsBadArg, "Unknown array type"); CV_Error(CV_StsBadArg, "Unknown array type");
return Mat();
} }
void extractImageCOI(const CvArr* arr, OutputArray _ch, int coi) void extractImageCOI(const CvArr* arr, OutputArray _ch, int coi)

View File

@ -110,14 +110,12 @@ Mat _InputArray::getMat_(int i) const
{ {
CV_Assert( i < 0 ); CV_Assert( i < 0 );
CV_Error(cv::Error::StsNotImplemented, "You should explicitly call mapHost/unmapHost methods for ogl::Buffer object"); CV_Error(cv::Error::StsNotImplemented, "You should explicitly call mapHost/unmapHost methods for ogl::Buffer object");
return Mat();
} }
if( k == CUDA_GPU_MAT ) if( k == CUDA_GPU_MAT )
{ {
CV_Assert( i < 0 ); CV_Assert( i < 0 );
CV_Error(cv::Error::StsNotImplemented, "You should explicitly call download method for cuda::GpuMat object"); CV_Error(cv::Error::StsNotImplemented, "You should explicitly call download method for cuda::GpuMat object");
return Mat();
} }
if( k == CUDA_HOST_MEM ) if( k == CUDA_HOST_MEM )
@ -130,7 +128,6 @@ Mat _InputArray::getMat_(int i) const
} }
CV_Error(Error::StsNotImplemented, "Unknown/unsupported array type"); CV_Error(Error::StsNotImplemented, "Unknown/unsupported array type");
return Mat();
} }
UMat _InputArray::getUMat(int i) const UMat _InputArray::getUMat(int i) const
@ -354,14 +351,12 @@ cuda::GpuMat _InputArray::getGpuMat() const
if (k == OPENGL_BUFFER) if (k == OPENGL_BUFFER)
{ {
CV_Error(cv::Error::StsNotImplemented, "You should explicitly call mapDevice/unmapDevice methods for ogl::Buffer object"); CV_Error(cv::Error::StsNotImplemented, "You should explicitly call mapDevice/unmapDevice methods for ogl::Buffer object");
return cuda::GpuMat();
} }
if (k == NONE) if (k == NONE)
return cuda::GpuMat(); return cuda::GpuMat();
CV_Error(cv::Error::StsNotImplemented, "getGpuMat is available only for cuda::GpuMat and cuda::HostMem"); CV_Error(cv::Error::StsNotImplemented, "getGpuMat is available only for cuda::GpuMat and cuda::HostMem");
return cuda::GpuMat();
} }
void _InputArray::getGpuMatVector(std::vector<cuda::GpuMat>& gpumv) const void _InputArray::getGpuMatVector(std::vector<cuda::GpuMat>& gpumv) const
{ {
@ -516,7 +511,6 @@ Size _InputArray::size(int i) const
} }
CV_Error(Error::StsNotImplemented, "Unknown/unsupported array type"); CV_Error(Error::StsNotImplemented, "Unknown/unsupported array type");
return Size();
} }
int _InputArray::sizend(int* arrsz, int i) const int _InputArray::sizend(int* arrsz, int i) const
@ -716,7 +710,6 @@ int _InputArray::dims(int i) const
} }
CV_Error(Error::StsNotImplemented, "Unknown/unsupported array type"); CV_Error(Error::StsNotImplemented, "Unknown/unsupported array type");
return 0;
} }
size_t _InputArray::total(int i) const size_t _InputArray::total(int i) const
@ -845,7 +838,6 @@ int _InputArray::type(int i) const
return ((const cuda::HostMem*)obj)->type(); return ((const cuda::HostMem*)obj)->type();
CV_Error(Error::StsNotImplemented, "Unknown/unsupported array type"); CV_Error(Error::StsNotImplemented, "Unknown/unsupported array type");
return 0;
} }
int _InputArray::depth(int i) const int _InputArray::depth(int i) const
@ -928,7 +920,6 @@ bool _InputArray::empty() const
return ((const cuda::HostMem*)obj)->empty(); return ((const cuda::HostMem*)obj)->empty();
CV_Error(Error::StsNotImplemented, "Unknown/unsupported array type"); CV_Error(Error::StsNotImplemented, "Unknown/unsupported array type");
return true;
} }
bool _InputArray::isContinuous(int i) const bool _InputArray::isContinuous(int i) const
@ -970,7 +961,6 @@ bool _InputArray::isContinuous(int i) const
return i < 0 ? ((const cuda::GpuMat*)obj)->isContinuous() : true; return i < 0 ? ((const cuda::GpuMat*)obj)->isContinuous() : true;
CV_Error(CV_StsNotImplemented, "Unknown/unsupported array type"); CV_Error(CV_StsNotImplemented, "Unknown/unsupported array type");
return false;
} }
bool _InputArray::isSubmatrix(int i) const bool _InputArray::isSubmatrix(int i) const
@ -1009,7 +999,6 @@ bool _InputArray::isSubmatrix(int i) const
} }
CV_Error(CV_StsNotImplemented, ""); CV_Error(CV_StsNotImplemented, "");
return false;
} }
size_t _InputArray::offset(int i) const size_t _InputArray::offset(int i) const
@ -1074,7 +1063,6 @@ size_t _InputArray::offset(int i) const
} }
CV_Error(Error::StsNotImplemented, ""); CV_Error(Error::StsNotImplemented, "");
return 0;
} }
size_t _InputArray::step(int i) const size_t _InputArray::step(int i) const
@ -1135,7 +1123,6 @@ size_t _InputArray::step(int i) const
} }
CV_Error(Error::StsNotImplemented, ""); CV_Error(Error::StsNotImplemented, "");
return 0;
} }
void _InputArray::copyTo(const _OutputArray& arr) const void _InputArray::copyTo(const _OutputArray& arr) const
@ -1459,7 +1446,6 @@ void _OutputArray::create(int d, const int* sizes, int mtype, int i,
if( k == NONE ) if( k == NONE )
{ {
CV_Error(CV_StsNullPtr, "create() called for the missing output array" ); CV_Error(CV_StsNullPtr, "create() called for the missing output array" );
return;
} }
if( k == STD_VECTOR_MAT ) if( k == STD_VECTOR_MAT )

View File

@ -133,7 +133,7 @@ namespace cv { namespace ocl {
int refcount int refcount
#ifndef HAVE_OPENCL #ifndef HAVE_OPENCL
#define CV_OPENCL_NO_SUPPORT() CV_ErrorNoReturn(cv::Error::OpenCLApiCallError, "OpenCV build without OpenCL support") #define CV_OPENCL_NO_SUPPORT() CV_Error(cv::Error::OpenCLApiCallError, "OpenCV build without OpenCL support")
namespace { namespace {
struct DummyImpl struct DummyImpl
{ {
@ -2177,7 +2177,7 @@ struct Context::Impl
if (!ptr) if (!ptr)
{ {
CV_OPENCL_SVM_TRACE_ERROR_P("clSVMAlloc returned NULL...\n"); CV_OPENCL_SVM_TRACE_ERROR_P("clSVMAlloc returned NULL...\n");
CV_ErrorNoReturn(Error::StsBadArg, "clSVMAlloc returned NULL"); CV_Error(Error::StsBadArg, "clSVMAlloc returned NULL");
} }
try try
{ {
@ -2186,7 +2186,7 @@ struct Context::Impl
if (CL_SUCCESS != clEnqueueSVMMap(q, CL_TRUE, CL_MAP_WRITE, ptr, 100, 0, NULL, NULL)) if (CL_SUCCESS != clEnqueueSVMMap(q, CL_TRUE, CL_MAP_WRITE, ptr, 100, 0, NULL, NULL))
{ {
CV_OPENCL_SVM_TRACE_ERROR_P("clEnqueueSVMMap failed...\n"); CV_OPENCL_SVM_TRACE_ERROR_P("clEnqueueSVMMap failed...\n");
CV_ErrorNoReturn(Error::StsBadArg, "clEnqueueSVMMap FAILED"); CV_Error(Error::StsBadArg, "clEnqueueSVMMap FAILED");
} }
clFinish(q); clFinish(q);
try try
@ -2201,12 +2201,12 @@ struct Context::Impl
if (CL_SUCCESS != clEnqueueSVMUnmap(q, ptr, 0, NULL, NULL)) if (CL_SUCCESS != clEnqueueSVMUnmap(q, ptr, 0, NULL, NULL))
{ {
CV_OPENCL_SVM_TRACE_ERROR_P("clEnqueueSVMUnmap failed...\n"); CV_OPENCL_SVM_TRACE_ERROR_P("clEnqueueSVMUnmap failed...\n");
CV_ErrorNoReturn(Error::StsBadArg, "clEnqueueSVMUnmap FAILED"); CV_Error(Error::StsBadArg, "clEnqueueSVMUnmap FAILED");
} }
clFinish(q); clFinish(q);
if (error) if (error)
{ {
CV_ErrorNoReturn(Error::StsBadArg, "OpenCL SVM buffer access test was FAILED"); CV_Error(Error::StsBadArg, "OpenCL SVM buffer access test was FAILED");
} }
} }
catch (...) catch (...)
@ -2412,7 +2412,7 @@ void Context::setUseSVM(bool enabled)
i->svmInit(); i->svmInit();
if (enabled && !i->svmAvailable) if (enabled && !i->svmAvailable)
{ {
CV_ErrorNoReturn(Error::StsError, "OpenCL Shared Virtual Memory (SVM) is not supported by OpenCL device"); CV_Error(Error::StsError, "OpenCL Shared Virtual Memory (SVM) is not supported by OpenCL device");
} }
i->svmEnabled = enabled; i->svmEnabled = enabled;
} }
@ -2483,7 +2483,7 @@ void attachContext(const String& platformName, void* platformID, void* context,
CV_OCL_CHECK(clGetPlatformIDs(0, 0, &cnt)); CV_OCL_CHECK(clGetPlatformIDs(0, 0, &cnt));
if (cnt == 0) if (cnt == 0)
CV_ErrorNoReturn(cv::Error::OpenCLApiCallError, "no OpenCL platform available!"); CV_Error(cv::Error::OpenCLApiCallError, "no OpenCL platform available!");
std::vector<cl_platform_id> platforms(cnt); std::vector<cl_platform_id> platforms(cnt);
@ -2505,13 +2505,13 @@ void attachContext(const String& platformName, void* platformID, void* context,
} }
if (!platformAvailable) if (!platformAvailable)
CV_ErrorNoReturn(cv::Error::OpenCLApiCallError, "No matched platforms available!"); CV_Error(cv::Error::OpenCLApiCallError, "No matched platforms available!");
// check if platformID corresponds to platformName // check if platformID corresponds to platformName
String actualPlatformName; String actualPlatformName;
get_platform_name((cl_platform_id)platformID, actualPlatformName); get_platform_name((cl_platform_id)platformID, actualPlatformName);
if (platformName != actualPlatformName) if (platformName != actualPlatformName)
CV_ErrorNoReturn(cv::Error::OpenCLApiCallError, "No matched platforms available!"); CV_Error(cv::Error::OpenCLApiCallError, "No matched platforms available!");
// do not initialize OpenCL context // do not initialize OpenCL context
Context ctx = Context::getDefault(false); Context ctx = Context::getDefault(false);
@ -3305,7 +3305,7 @@ struct ProgramSource::Impl
hash = crc64(sourceAddr_, sourceSize_); hash = crc64(sourceAddr_, sourceSize_);
break; break;
default: default:
CV_ErrorNoReturn(Error::StsInternal, "Internal error"); CV_Error(Error::StsInternal, "Internal error");
} }
sourceHash_ = cv::format("%08llx", hash); sourceHash_ = cv::format("%08llx", hash);
isHashUpdated = true; isHashUpdated = true;
@ -3427,7 +3427,7 @@ const String& ProgramSource::source() const
ProgramSource::hash_t ProgramSource::hash() const ProgramSource::hash_t ProgramSource::hash() const
{ {
CV_ErrorNoReturn(Error::StsNotImplemented, "Removed method: ProgramSource::hash()"); CV_Error(Error::StsNotImplemented, "Removed method: ProgramSource::hash()");
} }
ProgramSource ProgramSource::fromBinary(const String& module, const String& name, ProgramSource ProgramSource::fromBinary(const String& module, const String& name,
@ -3597,11 +3597,11 @@ struct Program::Impl
} }
else if (src_->kind_ == ProgramSource::Impl::PROGRAM_SPIRV) else if (src_->kind_ == ProgramSource::Impl::PROGRAM_SPIRV)
{ {
CV_ErrorNoReturn(Error::StsNotImplemented, "OpenCL: SPIR-V is not supported"); CV_Error(Error::StsNotImplemented, "OpenCL: SPIR-V is not supported");
} }
else else
{ {
CV_ErrorNoReturn(Error::StsInternal, "Internal error"); CV_Error(Error::StsInternal, "Internal error");
} }
CV_Assert(handle != NULL); CV_Assert(handle != NULL);
#if OPENCV_HAVE_FILESYSTEM_SUPPORT #if OPENCV_HAVE_FILESYSTEM_SUPPORT
@ -3948,19 +3948,19 @@ void* Program::ptr() const
#ifndef OPENCV_REMOVE_DEPRECATED_API #ifndef OPENCV_REMOVE_DEPRECATED_API
const ProgramSource& Program::source() const const ProgramSource& Program::source() const
{ {
CV_ErrorNoReturn(Error::StsNotImplemented, "Removed API"); CV_Error(Error::StsNotImplemented, "Removed API");
} }
bool Program::read(const String& bin, const String& buildflags) bool Program::read(const String& bin, const String& buildflags)
{ {
CV_UNUSED(bin); CV_UNUSED(buildflags); CV_UNUSED(bin); CV_UNUSED(buildflags);
CV_ErrorNoReturn(Error::StsNotImplemented, "Removed API"); CV_Error(Error::StsNotImplemented, "Removed API");
} }
bool Program::write(String& bin) const bool Program::write(String& bin) const
{ {
CV_UNUSED(bin); CV_UNUSED(bin);
CV_ErrorNoReturn(Error::StsNotImplemented, "Removed API"); CV_Error(Error::StsNotImplemented, "Removed API");
} }
String Program::getPrefix() const String Program::getPrefix() const
@ -5627,7 +5627,7 @@ public:
} }
if (id != NULL && strcmp(id, "OCL") != 0) if (id != NULL && strcmp(id, "OCL") != 0)
{ {
CV_ErrorNoReturn(cv::Error::StsBadArg, "getBufferPoolController(): unknown BufferPool ID\n"); CV_Error(cv::Error::StsBadArg, "getBufferPoolController(): unknown BufferPool ID\n");
} }
return &bufferPool; return &bufferPool;
} }

View File

@ -316,7 +316,7 @@ static void* opencl_check_fn(int ID)
#endif #endif
else else
{ {
CV_ErrorNoReturn(cv::Error::StsBadArg, "Invalid function ID"); CV_Error(cv::Error::StsBadArg, "Invalid function ID");
} }
void* func = CV_CL_GET_PROC_ADDRESS(e->fnName); void* func = CV_CL_GET_PROC_ADDRESS(e->fnName);
if (!func) if (!func)

View File

@ -48,7 +48,7 @@
# include <cuda_gl_interop.h> # include <cuda_gl_interop.h>
# endif # endif
#else // HAVE_OPENGL #else // HAVE_OPENGL
# define NO_OPENGL_SUPPORT_ERROR CV_ErrorNoReturn(cv::Error::StsBadFunc, "OpenCV was build without OpenGL support") # define NO_OPENGL_SUPPORT_ERROR CV_Error(cv::Error::StsBadFunc, "OpenCV was build without OpenGL support")
#endif // HAVE_OPENGL #endif // HAVE_OPENGL
using namespace cv; using namespace cv;
@ -1304,10 +1304,15 @@ void cv::ogl::Arrays::release()
void cv::ogl::Arrays::setAutoRelease(bool flag) void cv::ogl::Arrays::setAutoRelease(bool flag)
{ {
#ifndef HAVE_OPENGL
CV_UNUSED(flag);
throw_no_ogl();
#else
vertex_.setAutoRelease(flag); vertex_.setAutoRelease(flag);
color_.setAutoRelease(flag); color_.setAutoRelease(flag);
normal_.setAutoRelease(flag); normal_.setAutoRelease(flag);
texCoord_.setAutoRelease(flag); texCoord_.setAutoRelease(flag);
#endif
} }
void cv::ogl::Arrays::bind() const void cv::ogl::Arrays::bind() const
@ -1563,10 +1568,10 @@ void cv::ogl::render(const ogl::Arrays& arr, InputArray indices, int mode, Scala
# ifdef cl_khr_gl_sharing # ifdef cl_khr_gl_sharing
# define HAVE_OPENCL_OPENGL_SHARING # define HAVE_OPENCL_OPENGL_SHARING
# else # else
# define NO_OPENCL_SHARING_ERROR CV_ErrorNoReturn(cv::Error::StsBadFunc, "OpenCV was build without OpenCL/OpenGL sharing support") # define NO_OPENCL_SHARING_ERROR CV_Error(cv::Error::StsBadFunc, "OpenCV was build without OpenCL/OpenGL sharing support")
# endif # endif
#else // HAVE_OPENCL #else // HAVE_OPENCL
# define NO_OPENCL_SUPPORT_ERROR CV_ErrorNoReturn(cv::Error::StsBadFunc, "OpenCV was build without OpenCL support") # define NO_OPENCL_SUPPORT_ERROR CV_Error(cv::Error::StsBadFunc, "OpenCV was build without OpenCL support")
#endif // HAVE_OPENCL #endif // HAVE_OPENCL
#if defined(HAVE_OPENGL) #if defined(HAVE_OPENGL)

View File

@ -233,7 +233,7 @@ namespace
#if CV__EXCEPTION_PTR #if CV__EXCEPTION_PTR
std::rethrow_exception(pException); std::rethrow_exception(pException);
#else #else
CV_ErrorNoReturn(Error::StsError, "Exception in parallel_for() body: " + exception_message); CV_Error(Error::StsError, "Exception in parallel_for() body: " + exception_message);
#endif #endif
} }
} }

View File

@ -85,7 +85,7 @@ char* icvGets( CvFileStorage* fs, char* str, int maxCount )
return ptr; return ptr;
} }
#endif #endif
CV_ErrorNoReturn(CV_StsError, "The storage is not opened"); CV_Error(CV_StsError, "The storage is not opened");
} }
int icvEof( CvFileStorage* fs ) int icvEof( CvFileStorage* fs )

View File

@ -532,7 +532,7 @@ struct HWFeatures
"******************************************************************\n"); "******************************************************************\n");
fprintf(stderr, "\nRequired baseline features:\n"); fprintf(stderr, "\nRequired baseline features:\n");
checkFeatures(baseline_features, sizeof(baseline_features) / sizeof(baseline_features[0]), true); checkFeatures(baseline_features, sizeof(baseline_features) / sizeof(baseline_features[0]), true);
CV_ErrorNoReturn(cv::Error::StsAssert, "Missing support for required CPU baseline features. Check OpenCV build configuration and required CPU/HW setup."); CV_Error(cv::Error::StsAssert, "Missing support for required CPU baseline features. Check OpenCV build configuration and required CPU/HW setup.");
} }
readSettings(baseline_features, sizeof(baseline_features) / sizeof(baseline_features[0])); readSettings(baseline_features, sizeof(baseline_features) / sizeof(baseline_features[0]));
@ -1567,7 +1567,7 @@ bool utils::getConfigurationParameterBool(const char* name, bool defaultValue)
{ {
return false; return false;
} }
CV_ErrorNoReturn(cv::Error::StsBadArg, cv::format("Invalid value for %s parameter: %s", name, value.c_str())); CV_Error(cv::Error::StsBadArg, cv::format("Invalid value for %s parameter: %s", name, value.c_str()));
} }
@ -1598,7 +1598,7 @@ size_t utils::getConfigurationParameterSizeT(const char* name, size_t defaultVal
return v * 1024 * 1024; return v * 1024 * 1024;
else if (suffixStr == "KB" || suffixStr == "Kb" || suffixStr == "kb") else if (suffixStr == "KB" || suffixStr == "Kb" || suffixStr == "kb")
return v * 1024; return v * 1024;
CV_ErrorNoReturn(cv::Error::StsBadArg, cv::format("Invalid value for %s parameter: %s", name, value.c_str())); CV_Error(cv::Error::StsBadArg, cv::format("Invalid value for %s parameter: %s", name, value.c_str()));
} }
cv::String utils::getConfigurationParameterString(const char* name, const char* defaultValue) cv::String utils::getConfigurationParameterString(const char* name, const char* defaultValue)

View File

@ -837,8 +837,6 @@ UMat UMat::reshape(int _cn, int _newndims, const int* _newsz) const
} }
CV_Error(CV_StsNotImplemented, "Reshaping of n-dimensional non-continuous matrices is not supported yet"); CV_Error(CV_StsNotImplemented, "Reshaping of n-dimensional non-continuous matrices is not supported yet");
// TBD
return UMat();
} }
Mat UMat::getMat(int accessFlags) const Mat UMat::getMat(int accessFlags) const

View File

@ -265,7 +265,7 @@ struct FileLock::Impl
} }
else else
{ {
CV_ErrorNoReturn_(Error::StsAssert, ("Can't open lock file: %s", fname)); CV_Error_(Error::StsAssert, ("Can't open lock file: %s", fname));
} }
} }
break; break;
@ -517,7 +517,7 @@ cv::String getCacheDirectory(const char* sub_directory_name, const char* configu
} }
#else #else
#define NOT_IMPLEMENTED CV_ErrorNoReturn(Error::StsNotImplemented, ""); #define NOT_IMPLEMENTED CV_Error(Error::StsNotImplemented, "");
CV_EXPORTS bool exists(const cv::String& /*path*/) { NOT_IMPLEMENTED } CV_EXPORTS bool exists(const cv::String& /*path*/) { NOT_IMPLEMENTED }
CV_EXPORTS void remove_all(const cv::String& /*path*/) { NOT_IMPLEMENTED } CV_EXPORTS void remove_all(const cv::String& /*path*/) { NOT_IMPLEMENTED }
CV_EXPORTS bool createDirectory(const cv::String& /*path*/) { NOT_IMPLEMENTED } CV_EXPORTS bool createDirectory(const cv::String& /*path*/) { NOT_IMPLEMENTED }

View File

@ -10,7 +10,7 @@
#ifdef HAVE_VA #ifdef HAVE_VA
# include <va/va.h> # include <va/va.h>
#else // HAVE_VA #else // HAVE_VA
# define NO_VA_SUPPORT_ERROR CV_ErrorNoReturn(cv::Error::StsBadFunc, "OpenCV was build without VA support (libva)") # define NO_VA_SUPPORT_ERROR CV_Error(cv::Error::StsBadFunc, "OpenCV was build without VA support (libva)")
#endif // HAVE_VA #endif // HAVE_VA
using namespace cv; using namespace cv;

View File

@ -174,7 +174,6 @@ bool Core_EigenTest::check_pair_count(const cv::Mat& src, const cv::Mat& evalues
std::cout << "Number of rows: " << evalues.rows << " Number of cols: " << evalues.cols << endl; std::cout << "Number of rows: " << evalues.rows << " Number of cols: " << evalues.cols << endl;
std::cout << "Size of src symmetric matrix: " << src.rows << " * " << src.cols << endl; std::cout << endl; std::cout << "Size of src symmetric matrix: " << src.rows << " * " << src.cols << endl; std::cout << endl;
CV_Error(CORE_EIGEN_ERROR_COUNT, MESSAGE_ERROR_COUNT); CV_Error(CORE_EIGEN_ERROR_COUNT, MESSAGE_ERROR_COUNT);
return false;
} }
return true; return true;
} }
@ -190,7 +189,6 @@ bool Core_EigenTest::check_pair_count(const cv::Mat& src, const cv::Mat& evalues
std::cout << "Number of rows: " << evectors.rows << " Number of cols: " << evectors.cols << endl; std::cout << "Number of rows: " << evectors.rows << " Number of cols: " << evectors.cols << endl;
std:: cout << "Size of src symmetric matrix: " << src.rows << " * " << src.cols << endl; std::cout << endl; std:: cout << "Size of src symmetric matrix: " << src.rows << " * " << src.cols << endl; std::cout << endl;
CV_Error (CORE_EIGEN_ERROR_SIZE, MESSAGE_ERROR_SIZE); CV_Error (CORE_EIGEN_ERROR_SIZE, MESSAGE_ERROR_SIZE);
return false;
} }
if (!(evalues.rows == right_eigen_pair_count && evalues.cols == 1)) if (!(evalues.rows == right_eigen_pair_count && evalues.cols == 1))
@ -199,7 +197,6 @@ bool Core_EigenTest::check_pair_count(const cv::Mat& src, const cv::Mat& evalues
std::cout << "Number of rows: " << evalues.rows << " Number of cols: " << evalues.cols << endl; std::cout << "Number of rows: " << evalues.rows << " Number of cols: " << evalues.cols << endl;
std:: cout << "Size of src symmetric matrix: " << src.rows << " * " << src.cols << endl; std::cout << endl; std:: cout << "Size of src symmetric matrix: " << src.rows << " * " << src.cols << endl; std::cout << endl;
CV_Error (CORE_EIGEN_ERROR_COUNT, MESSAGE_ERROR_COUNT); CV_Error (CORE_EIGEN_ERROR_COUNT, MESSAGE_ERROR_COUNT);
return false;
} }
return true; return true;
@ -237,7 +234,6 @@ bool Core_EigenTest::check_orthogonality(const cv::Mat& U)
std::cout << endl; std::cout << "Checking orthogonality of matrix " << U << ": "; std::cout << endl; std::cout << "Checking orthogonality of matrix " << U << ": ";
print_information(i, U, diff, eps_vec); print_information(i, U, diff, eps_vec);
CV_Error(CORE_EIGEN_ERROR_ORTHO, MESSAGE_ERROR_ORTHO); CV_Error(CORE_EIGEN_ERROR_ORTHO, MESSAGE_ERROR_ORTHO);
return false;
} }
} }
@ -257,7 +253,6 @@ bool Core_EigenTest::check_pairs_order(const cv::Mat& eigen_values)
std::cout << "Pair of indexes with non descending of eigen values: (" << i << ", " << i+1 << ")." << endl; std::cout << "Pair of indexes with non descending of eigen values: (" << i << ", " << i+1 << ")." << endl;
std::cout << endl; std::cout << endl;
CV_Error(CORE_EIGEN_ERROR_ORDER, MESSAGE_ERROR_ORDER); CV_Error(CORE_EIGEN_ERROR_ORDER, MESSAGE_ERROR_ORDER);
return false;
} }
break; break;
@ -272,7 +267,6 @@ bool Core_EigenTest::check_pairs_order(const cv::Mat& eigen_values)
std::cout << "Pair of indexes with non descending of eigen values: (" << i << ", " << i+1 << ")." << endl; std::cout << "Pair of indexes with non descending of eigen values: (" << i << ", " << i+1 << ")." << endl;
std::cout << endl; std::cout << endl;
CV_Error(CORE_EIGEN_ERROR_ORDER, "Eigen values are not sorted in descending order."); CV_Error(CORE_EIGEN_ERROR_ORDER, "Eigen values are not sorted in descending order.");
return false;
} }
break; break;
@ -331,7 +325,6 @@ bool Core_EigenTest::test_pairs(const cv::Mat& src)
std::cout << endl; std::cout << "Checking accuracy of eigen vectors computing for matrix " << src << ": "; std::cout << endl; std::cout << "Checking accuracy of eigen vectors computing for matrix " << src << ": ";
print_information(i, src, diff, eps_vec); print_information(i, src, diff, eps_vec);
CV_Error(CORE_EIGEN_ERROR_DIFF, MESSAGE_ERROR_DIFF_2); CV_Error(CORE_EIGEN_ERROR_DIFF, MESSAGE_ERROR_DIFF_2);
return false;
} }
} }
@ -360,7 +353,6 @@ bool Core_EigenTest::test_values(const cv::Mat& src)
std::cout << endl; std::cout << "Checking accuracy of eigen values computing for matrix " << src << ": "; std::cout << endl; std::cout << "Checking accuracy of eigen values computing for matrix " << src << ": ";
print_information(i, src, diff, eps_val); print_information(i, src, diff, eps_val);
CV_Error(CORE_EIGEN_ERROR_DIFF, MESSAGE_ERROR_DIFF_1); CV_Error(CORE_EIGEN_ERROR_DIFF, MESSAGE_ERROR_DIFF_1);
return false;
} }
} }

View File

@ -657,8 +657,15 @@ template<typename R> struct TheTest
TheTest & test_mask() TheTest & test_mask()
{ {
Data<R> dataA, dataB, dataC, dataD(1), dataE(2); typedef V_TypeTraits<LaneType> Traits;
typedef typename Traits::int_type int_type;
Data<R> dataA, dataB(0), dataC, dataD(1), dataE(2);
dataA[1] *= (LaneType)-1; dataA[1] *= (LaneType)-1;
const LaneType mask_one = Traits::reinterpret_from_int(~(typename Traits::uint_type)(0));
dataB[1] = mask_one;
dataB[R::nlanes / 2] = mask_one;
dataB[R::nlanes - 1] = mask_one;
dataC *= (LaneType)-1; dataC *= (LaneType)-1;
R a = dataA, b = dataB, c = dataC, d = dataD, e = dataE; R a = dataA, b = dataB, c = dataC, d = dataD, e = dataE;
@ -670,12 +677,9 @@ template<typename R> struct TheTest
EXPECT_EQ(true, v_check_all(c)); EXPECT_EQ(true, v_check_all(c));
EXPECT_EQ(true, v_check_any(a)); EXPECT_EQ(true, v_check_any(a));
EXPECT_EQ(false, v_check_any(b)); EXPECT_EQ(true, v_check_any(b));
EXPECT_EQ(true, v_check_any(c)); EXPECT_EQ(true, v_check_any(c));
typedef V_TypeTraits<LaneType> Traits;
typedef typename Traits::int_type int_type;
R f = v_select(b, d, e); R f = v_select(b, d, e);
Data<R> resF = f; Data<R> resF = f;
for (int i = 0; i < R::nlanes; ++i) for (int i = 0; i < R::nlanes; ++i)

View File

@ -555,7 +555,7 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
* An every sample in the batch is normalized separately. Optionally, * An every sample in the batch is normalized separately. Optionally,
* output is scaled by the trained parameters. * output is scaled by the trained parameters.
*/ */
class NormalizeBBoxLayer : public Layer class CV_EXPORTS NormalizeBBoxLayer : public Layer
{ {
public: public:
float pnorm, epsilon; float pnorm, epsilon;

View File

@ -142,6 +142,10 @@ public:
const T &set(const String &key, const T &value); const T &set(const String &key, const T &value);
friend std::ostream &operator<<(std::ostream &stream, const Dict &dict); friend std::ostream &operator<<(std::ostream &stream, const Dict &dict);
std::map<String, DictValue>::const_iterator begin() const;
std::map<String, DictValue>::const_iterator end() const;
}; };
//! @} //! @}

View File

@ -102,9 +102,13 @@ inline int64 DictValue::get<int64>(int idx) const
return (int64)doubleValue; return (int64)doubleValue;
} }
else if (type == Param::STRING)
{
return std::atoi((*ps)[idx].c_str());
}
else else
{ {
CV_Assert(isInt() || isReal()); CV_Assert(isInt() || isReal() || isString());
return 0; return 0;
} }
} }
@ -146,9 +150,13 @@ inline double DictValue::get<double>(int idx) const
{ {
return (double)(*pi)[idx]; return (double)(*pi)[idx];
} }
else if (type == Param::STRING)
{
return std::atof((*ps)[idx].c_str());
}
else else
{ {
CV_Assert(isReal() || isInt()); CV_Assert(isReal() || isInt() || isString());
return 0; return 0;
} }
} }
@ -261,17 +269,16 @@ inline int DictValue::size() const
{ {
case Param::INT: case Param::INT:
return (int)pi->size(); return (int)pi->size();
break;
case Param::STRING: case Param::STRING:
return (int)ps->size(); return (int)ps->size();
break;
case Param::REAL: case Param::REAL:
return (int)pd->size(); return (int)pd->size();
break;
default:
CV_Error(Error::StsInternal, "");
return -1;
} }
#ifdef __OPENCV_BUILD
CV_Error(Error::StsInternal, "");
#else
CV_ErrorNoReturn(Error::StsInternal, "");
#endif
} }
inline std::ostream &operator<<(std::ostream &stream, const DictValue &dictv) inline std::ostream &operator<<(std::ostream &stream, const DictValue &dictv)
@ -366,6 +373,16 @@ inline std::ostream &operator<<(std::ostream &stream, const Dict &dict)
return stream; return stream;
} }
inline std::map<String, DictValue>::const_iterator Dict::begin() const
{
return dict.begin();
}
inline std::map<String, DictValue>::const_iterator Dict::end() const
{
return dict.end();
}
CV__DNN_EXPERIMENTAL_NS_END CV__DNN_EXPERIMENTAL_NS_END
} }
} }

View File

@ -13,11 +13,11 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
/** @brief Registers layer constructor in runtime. /** @brief Registers layer constructor in runtime.
* @param type string, containing type name of the layer. * @param type string, containing type name of the layer.
* @param constuctorFunc pointer to the function of type LayerRegister::Constuctor, which creates the layer. * @param constructorFunc pointer to the function of type LayerRegister::Constructor, which creates the layer.
* @details This macros must be placed inside the function code. * @details This macros must be placed inside the function code.
*/ */
#define CV_DNN_REGISTER_LAYER_FUNC(type, constuctorFunc) \ #define CV_DNN_REGISTER_LAYER_FUNC(type, constructorFunc) \
cv::dnn::LayerFactory::registerLayer(#type, constuctorFunc); cv::dnn::LayerFactory::registerLayer(#type, constructorFunc);
/** @brief Registers layer class in runtime. /** @brief Registers layer class in runtime.
* @param type string, containing type name of the layer. * @param type string, containing type name of the layer.
@ -29,11 +29,11 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
/** @brief Registers layer constructor on module load time. /** @brief Registers layer constructor on module load time.
* @param type string, containing type name of the layer. * @param type string, containing type name of the layer.
* @param constuctorFunc pointer to the function of type LayerRegister::Constuctor, which creates the layer. * @param constructorFunc pointer to the function of type LayerRegister::Constructor, which creates the layer.
* @details This macros must be placed outside the function code. * @details This macros must be placed outside the function code.
*/ */
#define CV_DNN_REGISTER_LAYER_FUNC_STATIC(type, constuctorFunc) \ #define CV_DNN_REGISTER_LAYER_FUNC_STATIC(type, constructorFunc) \
static cv::dnn::details::_LayerStaticRegisterer __LayerStaticRegisterer_##type(#type, constuctorFunc); static cv::dnn::details::_LayerStaticRegisterer __LayerStaticRegisterer_##type(#type, constructorFunc);
/** @brief Registers layer class on module load time. /** @brief Registers layer class on module load time.
* @param type string, containing type name of the layer. * @param type string, containing type name of the layer.
@ -59,10 +59,10 @@ class _LayerStaticRegisterer
String type; String type;
public: public:
_LayerStaticRegisterer(const String &layerType, LayerFactory::Constuctor layerConstuctor) _LayerStaticRegisterer(const String &layerType, LayerFactory::Constructor layerConstructor)
{ {
this->type = layerType; this->type = layerType;
LayerFactory::registerLayer(layerType, layerConstuctor); LayerFactory::registerLayer(layerType, layerConstructor);
} }
~_LayerStaticRegisterer() ~_LayerStaticRegisterer()

View File

@ -58,10 +58,10 @@ class CV_EXPORTS LayerFactory
public: public:
//! Each Layer class must provide this function to the factory //! Each Layer class must provide this function to the factory
typedef Ptr<Layer>(*Constuctor)(LayerParams &params); typedef Ptr<Layer>(*Constructor)(LayerParams &params);
//! Registers the layer class with typename @p type and specified @p constructor. Thread-safe. //! Registers the layer class with typename @p type and specified @p constructor. Thread-safe.
static void registerLayer(const String &type, Constuctor constructor); static void registerLayer(const String &type, Constructor constructor);
//! Unregisters registered layer with specified type name. Thread-safe. //! Unregisters registered layer with specified type name. Thread-safe.
static void unregisterLayer(const String &type); static void unregisterLayer(const String &type);

View File

@ -103,6 +103,19 @@ public:
ReadNetParamsFromBinaryBufferOrDie(dataModel, lenModel, &netBinary); ReadNetParamsFromBinaryBufferOrDie(dataModel, lenModel, &netBinary);
} }
void extractCustomParams(const google::protobuf::UnknownFieldSet& unknownFields, cv::dnn::LayerParams &params)
{
const int numFields = unknownFields.field_count();
for (int i = 0; i < numFields; ++i)
{
const google::protobuf::UnknownField& field = unknownFields.field(i);
CV_Assert(field.type() == google::protobuf::UnknownField::TYPE_GROUP);
std::string fieldName = field.group().field(0).length_delimited();
std::string fieldValue = field.group().field(1).length_delimited();
params.set(fieldName, fieldValue);
}
}
void addParam(const Message &msg, const FieldDescriptor *field, cv::dnn::LayerParams &params) void addParam(const Message &msg, const FieldDescriptor *field, cv::dnn::LayerParams &params)
{ {
const Reflection *refl = msg.GetReflection(); const Reflection *refl = msg.GetReflection();
@ -187,12 +200,15 @@ public:
if (!isInternal && !ends_with_param(fd->name())) if (!isInternal && !ends_with_param(fd->name()))
continue; continue;
const google::protobuf::UnknownFieldSet& unknownFields = msgRefl->GetUnknownFields(msg);
bool hasData = fd->is_required() || bool hasData = fd->is_required() ||
(fd->is_optional() && msgRefl->HasField(msg, fd)) || (fd->is_optional() && msgRefl->HasField(msg, fd)) ||
(fd->is_repeated() && msgRefl->FieldSize(msg, fd) > 0); (fd->is_repeated() && msgRefl->FieldSize(msg, fd) > 0) ||
!unknownFields.empty();
if (!hasData) if (!hasData)
continue; continue;
extractCustomParams(unknownFields, params);
if (fd->cpp_type() == FieldDescriptor::CPPTYPE_MESSAGE) if (fd->cpp_type() == FieldDescriptor::CPPTYPE_MESSAGE)
{ {
if (fd->is_repeated()) //Extract only first item! if (fd->is_repeated()) //Extract only first item!
@ -258,7 +274,7 @@ public:
} }
} }
void extractBinaryLayerParms(const caffe::LayerParameter& layer, LayerParams& layerParams) void extractBinaryLayerParams(const caffe::LayerParameter& layer, LayerParams& layerParams)
{ {
const std::string &name = layer.name(); const std::string &name = layer.name();
@ -319,7 +335,7 @@ public:
LayerParams layerParams; LayerParams layerParams;
extractLayerParams(layer, layerParams); extractLayerParams(layer, layerParams);
extractBinaryLayerParms(layer, layerParams); extractBinaryLayerParams(layer, layerParams);
int repetitions = layerCounter[name]++; int repetitions = layerCounter[name]++;
if (repetitions) if (repetitions)

View File

@ -1120,7 +1120,7 @@ bool ReadProtoFromTextFile(const char* filename, Message* proto) {
std::ifstream fs(filename, std::ifstream::in); std::ifstream fs(filename, std::ifstream::in);
CHECK(fs.is_open()) << "Can't open \"" << filename << "\""; CHECK(fs.is_open()) << "Can't open \"" << filename << "\"";
IstreamInputStream input(&fs); IstreamInputStream input(&fs);
return google::protobuf::TextFormat::Parse(&input, proto); return google::protobuf::TextFormat::Parser(true).Parse(&input, proto);
} }
bool ReadProtoFromBinaryFile(const char* filename, Message* proto) { bool ReadProtoFromBinaryFile(const char* filename, Message* proto) {

View File

@ -1941,7 +1941,7 @@ Net::Net() : impl(new Net::Impl)
Net Net::readFromModelOptimizer(const String& xml, const String& bin) Net Net::readFromModelOptimizer(const String& xml, const String& bin)
{ {
#ifndef HAVE_INF_ENGINE #ifndef HAVE_INF_ENGINE
CV_ErrorNoReturn(Error::StsError, "Build OpenCV with Inference Engine to enable loading models from Model Optimizer."); CV_Error(Error::StsError, "Build OpenCV with Inference Engine to enable loading models from Model Optimizer.");
#else #else
InferenceEngine::CNNNetReader reader; InferenceEngine::CNNNetReader reader;
reader.ReadNetwork(xml); reader.ReadNetwork(xml);
@ -2790,7 +2790,7 @@ static Mutex& getLayerFactoryMutex()
return *instance; return *instance;
} }
typedef std::map<String, LayerFactory::Constuctor> LayerFactory_Impl; typedef std::map<String, std::vector<LayerFactory::Constructor> > LayerFactory_Impl;
static LayerFactory_Impl& getLayerFactoryImpl_() static LayerFactory_Impl& getLayerFactoryImpl_()
{ {
@ -2813,21 +2813,22 @@ static LayerFactory_Impl& getLayerFactoryImpl()
return *instance; return *instance;
} }
void LayerFactory::registerLayer(const String &type, Constuctor constructor) void LayerFactory::registerLayer(const String &type, Constructor constructor)
{ {
CV_TRACE_FUNCTION(); CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(type, "type", type.c_str()); CV_TRACE_ARG_VALUE(type, "type", type.c_str());
cv::AutoLock lock(getLayerFactoryMutex()); cv::AutoLock lock(getLayerFactoryMutex());
String type_ = type.toLowerCase(); String type_ = type.toLowerCase();
LayerFactory_Impl::const_iterator it = getLayerFactoryImpl().find(type_); LayerFactory_Impl::iterator it = getLayerFactoryImpl().find(type_);
if (it != getLayerFactoryImpl().end() && it->second != constructor) if (it != getLayerFactoryImpl().end())
{ {
CV_Error(cv::Error::StsBadArg, "Layer \"" + type_ + "\" already was registered"); if (it->second.back() == constructor)
CV_Error(cv::Error::StsBadArg, "Layer \"" + type_ + "\" already was registered");
it->second.push_back(constructor);
} }
getLayerFactoryImpl().insert(std::make_pair(type_, std::vector<Constructor>(1, constructor)));
getLayerFactoryImpl().insert(std::make_pair(type_, constructor));
} }
void LayerFactory::unregisterLayer(const String &type) void LayerFactory::unregisterLayer(const String &type)
@ -2837,7 +2838,15 @@ void LayerFactory::unregisterLayer(const String &type)
cv::AutoLock lock(getLayerFactoryMutex()); cv::AutoLock lock(getLayerFactoryMutex());
String type_ = type.toLowerCase(); String type_ = type.toLowerCase();
getLayerFactoryImpl().erase(type_);
LayerFactory_Impl::iterator it = getLayerFactoryImpl().find(type_);
if (it != getLayerFactoryImpl().end())
{
if (it->second.size() > 1)
it->second.pop_back();
else
getLayerFactoryImpl().erase(it);
}
} }
Ptr<Layer> LayerFactory::createLayerInstance(const String &type, LayerParams& params) Ptr<Layer> LayerFactory::createLayerInstance(const String &type, LayerParams& params)
@ -2851,7 +2860,8 @@ Ptr<Layer> LayerFactory::createLayerInstance(const String &type, LayerParams& pa
if (it != getLayerFactoryImpl().end()) if (it != getLayerFactoryImpl().end())
{ {
return it->second(params); CV_Assert(!it->second.empty());
return it->second.back()(params);
} }
else else
{ {
@ -2920,7 +2930,7 @@ Net readNet(const String& _model, const String& _config, const String& _framewor
std::swap(model, config); std::swap(model, config);
return readNetFromModelOptimizer(config, model); return readNetFromModelOptimizer(config, model);
} }
CV_ErrorNoReturn(Error::StsError, "Cannot determine an origin framework of files: " + CV_Error(Error::StsError, "Cannot determine an origin framework of files: " +
model + (config.empty() ? "" : ", " + config)); model + (config.empty() ? "" : ", " + config));
} }

View File

@ -151,7 +151,7 @@ public:
message += " layer parameter does not contain "; message += " layer parameter does not contain ";
message += parameterName; message += parameterName;
message += " parameter."; message += " parameter.";
CV_ErrorNoReturn(Error::StsBadArg, message); CV_Error(Error::StsBadArg, message);
} }
else else
{ {
@ -471,12 +471,12 @@ public:
{ {
int label = it->first; int label = it->first;
if (confidenceScores.rows <= label) if (confidenceScores.rows <= label)
CV_ErrorNoReturn_(cv::Error::StsError, ("Could not find confidence predictions for label %d", label)); CV_Error_(cv::Error::StsError, ("Could not find confidence predictions for label %d", label));
const std::vector<float>& scores = confidenceScores.row(label); const std::vector<float>& scores = confidenceScores.row(label);
int locLabel = _shareLocation ? -1 : label; int locLabel = _shareLocation ? -1 : label;
LabelBBox::const_iterator label_bboxes = decodeBBoxes.find(locLabel); LabelBBox::const_iterator label_bboxes = decodeBBoxes.find(locLabel);
if (label_bboxes == decodeBBoxes.end()) if (label_bboxes == decodeBBoxes.end())
CV_ErrorNoReturn_(cv::Error::StsError, ("Could not find location predictions for label %d", locLabel)); CV_Error_(cv::Error::StsError, ("Could not find location predictions for label %d", locLabel));
const std::vector<int>& indices = it->second; const std::vector<int>& indices = it->second;
for (size_t j = 0; j < indices.size(); ++j, ++count) for (size_t j = 0; j < indices.size(); ++j, ++count)
@ -507,14 +507,14 @@ public:
if (c == _backgroundLabelId) if (c == _backgroundLabelId)
continue; // Ignore background class. continue; // Ignore background class.
if (c >= confidenceScores.rows) if (c >= confidenceScores.rows)
CV_ErrorNoReturn_(cv::Error::StsError, ("Could not find confidence predictions for label %d", c)); CV_Error_(cv::Error::StsError, ("Could not find confidence predictions for label %d", c));
const std::vector<float> scores = confidenceScores.row(c); const std::vector<float> scores = confidenceScores.row(c);
int label = _shareLocation ? -1 : c; int label = _shareLocation ? -1 : c;
LabelBBox::const_iterator label_bboxes = decodeBBoxes.find(label); LabelBBox::const_iterator label_bboxes = decodeBBoxes.find(label);
if (label_bboxes == decodeBBoxes.end()) if (label_bboxes == decodeBBoxes.end())
CV_ErrorNoReturn_(cv::Error::StsError, ("Could not find location predictions for label %d", label)); CV_Error_(cv::Error::StsError, ("Could not find location predictions for label %d", label));
if (_bboxesNormalized) if (_bboxesNormalized)
NMSFast_(label_bboxes->second, scores, _confidenceThreshold, _nmsThreshold, 1.0, _topK, NMSFast_(label_bboxes->second, scores, _confidenceThreshold, _nmsThreshold, 1.0, _topK,
indices[c], util::caffe_norm_box_overlap); indices[c], util::caffe_norm_box_overlap);
@ -532,7 +532,7 @@ public:
int label = it->first; int label = it->first;
const std::vector<int>& labelIndices = it->second; const std::vector<int>& labelIndices = it->second;
if (label >= confidenceScores.rows) if (label >= confidenceScores.rows)
CV_ErrorNoReturn_(cv::Error::StsError, ("Could not find location predictions for label %d", label)); CV_Error_(cv::Error::StsError, ("Could not find location predictions for label %d", label));
const std::vector<float>& scores = confidenceScores.row(label); const std::vector<float>& scores = confidenceScores.row(label);
for (size_t j = 0; j < labelIndices.size(); ++j) for (size_t j = 0; j < labelIndices.size(); ++j)
{ {
@ -645,7 +645,7 @@ public:
decode_bbox.ymax = decode_bbox_center_y + decode_bbox_height * .5; decode_bbox.ymax = decode_bbox_center_y + decode_bbox_height * .5;
} }
else else
CV_ErrorNoReturn(Error::StsBadArg, "Unknown type."); CV_Error(Error::StsBadArg, "Unknown type.");
if (clip_bbox) if (clip_bbox)
{ {
@ -714,7 +714,7 @@ public:
continue; // Ignore background class. continue; // Ignore background class.
LabelBBox::const_iterator label_loc_preds = loc_preds.find(label); LabelBBox::const_iterator label_loc_preds = loc_preds.find(label);
if (label_loc_preds == loc_preds.end()) if (label_loc_preds == loc_preds.end())
CV_ErrorNoReturn_(cv::Error::StsError, ("Could not find location predictions for label %d", label)); CV_Error_(cv::Error::StsError, ("Could not find location predictions for label %d", label));
DecodeBBoxes(prior_bboxes, prior_variances, DecodeBBoxes(prior_bboxes, prior_variances,
code_type, variance_encoded_in_target, clip, clip_bounds, code_type, variance_encoded_in_target, clip, clip_bounds,
normalized_bbox, label_loc_preds->second, decode_bboxes[label]); normalized_bbox, label_loc_preds->second, decode_bboxes[label]);

View File

@ -89,7 +89,7 @@ public:
if (net.node(i).name() == name) if (net.node(i).name() == name)
return net.node(i); return net.node(i);
} }
CV_ErrorNoReturn(Error::StsParseError, "Input node with name " + name + " not found"); CV_Error(Error::StsParseError, "Input node with name " + name + " not found");
} }
// Match TensorFlow subgraph starting from <nodeId> with a set of nodes to be fused. // Match TensorFlow subgraph starting from <nodeId> with a set of nodes to be fused.

View File

@ -1564,8 +1564,44 @@ void TFImporter::populateNet(Net dstNet)
} }
else else
{ {
printLayerAttr(layer); // Importer does not know how to map this TensorFlow's operation onto OpenCV's layer.
CV_Error_(Error::StsError, ("Unknown layer type %s in op %s", type.c_str(), name.c_str())); // However we create a layer with the same type and rely that user defined a custom layer.
// All the attributes are added to LayerParams.
google::protobuf::Map<std::string, tensorflow::AttrValue> attr = layer.attr();
for (google::protobuf::Map<std::string, tensorflow::AttrValue>::const_iterator ai = attr.begin();
ai != attr.end(); ++ai)
{
if (ai->second.value_case() == tensorflow::AttrValue::kS) // string
layerParams.set(ai->first, ai->second.s());
if (ai->second.value_case() == tensorflow::AttrValue::kI) // int64
layerParams.set(ai->first, ai->second.i());
if (ai->second.value_case() == tensorflow::AttrValue::kF) // float
layerParams.set(ai->first, ai->second.f());
if (ai->second.value_case() == tensorflow::AttrValue::kB) // bool
layerParams.set(ai->first, ai->second.b());
}
// All the Const input nodes are added to layer's blobs.
std::vector<std::string> inputsNames;
for (int i = 0; i < layer.input_size(); ++i)
{
// Check if input is a Const node.
if (value_id.find(layer.input(i)) != value_id.end())
{
Mat blob = getTensorContent(getConstBlob(layer, value_id, i));
layerParams.blobs.push_back(blob);
}
else
inputsNames.push_back(layer.input(i));
}
int id = dstNet.addLayer(name, type, layerParams);
layer_id[name] = id;
for (int i = 0; i < inputsNames.size(); ++i)
{
connect(layer_id, dstNet, parsePin(inputsNames[i]), id, i);
}
} }
} }
} }

View File

@ -940,7 +940,21 @@ struct TorchImporter
} }
else else
{ {
CV_Error(Error::StsNotImplemented, "Unknown nn class \"" + className + "\""); // Importer does not know how to map Torch's layer type to an OpenCV's one.
// However we parse all the parameters to let user create a custom layer.
readTorchTable(scalarParams, tensorParams);
for (std::map<String, DictValue>::const_iterator it = scalarParams.begin();
it != scalarParams.end(); ++it)
{
layerParams.set(it->first, it->second);
}
for (std::map<String, std::pair<int, Mat> >::iterator it = tensorParams.begin();
it != tensorParams.end(); ++it)
{
layerParams.blobs.push_back(it->second.second);
}
newModule->apiType = nnName;
curModule->modules.push_back(newModule);
} }
} }
else else

View File

@ -44,7 +44,7 @@
#include "npy_blob.hpp" #include "npy_blob.hpp"
#include <opencv2/dnn/shape_utils.hpp> #include <opencv2/dnn/shape_utils.hpp>
#include <opencv2/dnn/all_layers.hpp> #include <opencv2/dnn/all_layers.hpp>
#include <opencv2/ts/ocl_test.hpp> #include <opencv2/dnn/layer.details.hpp> // CV_DNN_REGISTER_LAYER_CLASS
namespace opencv_test { namespace { namespace opencv_test { namespace {
@ -117,94 +117,50 @@ void testLayerUsingCaffeModels(String basename, int targetId = DNN_TARGET_CPU,
normAssert(ref, out); normAssert(ref, out);
} }
TEST(Layer_Test_Softmax, Accuracy) typedef testing::TestWithParam<DNNTarget> Test_Caffe_layers;
TEST_P(Test_Caffe_layers, Softmax)
{ {
testLayerUsingCaffeModels("layer_softmax"); testLayerUsingCaffeModels("layer_softmax", GetParam());
} }
OCL_TEST(Layer_Test_Softmax, Accuracy) TEST_P(Test_Caffe_layers, LRN_spatial)
{ {
testLayerUsingCaffeModels("layer_softmax", DNN_TARGET_OPENCL); testLayerUsingCaffeModels("layer_lrn_spatial", GetParam());
} }
TEST(Layer_Test_LRN_spatial, Accuracy) TEST_P(Test_Caffe_layers, LRN_channels)
{ {
testLayerUsingCaffeModels("layer_lrn_spatial"); testLayerUsingCaffeModels("layer_lrn_channels", GetParam());
} }
OCL_TEST(Layer_Test_LRN_spatial, Accuracy) TEST_P(Test_Caffe_layers, Convolution)
{ {
testLayerUsingCaffeModels("layer_lrn_spatial", DNN_TARGET_OPENCL); testLayerUsingCaffeModels("layer_convolution", GetParam(), true);
} }
TEST(Layer_Test_LRN_channels, Accuracy) TEST_P(Test_Caffe_layers, DeConvolution)
{ {
testLayerUsingCaffeModels("layer_lrn_channels"); testLayerUsingCaffeModels("layer_deconvolution", GetParam(), true, false);
} }
OCL_TEST(Layer_Test_LRN_channels, Accuracy) TEST_P(Test_Caffe_layers, InnerProduct)
{ {
testLayerUsingCaffeModels("layer_lrn_channels", DNN_TARGET_OPENCL); testLayerUsingCaffeModels("layer_inner_product", GetParam(), true);
} }
TEST(Layer_Test_Convolution, Accuracy) TEST_P(Test_Caffe_layers, Pooling_max)
{ {
testLayerUsingCaffeModels("layer_convolution", DNN_TARGET_CPU, true); testLayerUsingCaffeModels("layer_pooling_max", GetParam());
} }
OCL_TEST(Layer_Test_Convolution, Accuracy) TEST_P(Test_Caffe_layers, Pooling_ave)
{ {
testLayerUsingCaffeModels("layer_convolution", DNN_TARGET_OPENCL, true); testLayerUsingCaffeModels("layer_pooling_ave", GetParam());
} }
TEST(Layer_Test_DeConvolution, Accuracy) TEST_P(Test_Caffe_layers, MVN)
{ {
testLayerUsingCaffeModels("layer_deconvolution", DNN_TARGET_CPU, true, false); testLayerUsingCaffeModels("layer_mvn", GetParam());
}
OCL_TEST(Layer_Test_DeConvolution, Accuracy)
{
testLayerUsingCaffeModels("layer_deconvolution", DNN_TARGET_OPENCL, true, false);
}
TEST(Layer_Test_InnerProduct, Accuracy)
{
testLayerUsingCaffeModels("layer_inner_product", DNN_TARGET_CPU, true);
}
OCL_TEST(Layer_Test_InnerProduct, Accuracy)
{
testLayerUsingCaffeModels("layer_inner_product", DNN_TARGET_OPENCL, true);
}
TEST(Layer_Test_Pooling_max, Accuracy)
{
testLayerUsingCaffeModels("layer_pooling_max");
}
OCL_TEST(Layer_Test_Pooling_max, Accuracy)
{
testLayerUsingCaffeModels("layer_pooling_max", DNN_TARGET_OPENCL);
}
TEST(Layer_Test_Pooling_ave, Accuracy)
{
testLayerUsingCaffeModels("layer_pooling_ave");
}
OCL_TEST(Layer_Test_Pooling_ave, Accuracy)
{
testLayerUsingCaffeModels("layer_pooling_ave", DNN_TARGET_OPENCL);
}
TEST(Layer_Test_MVN, Accuracy)
{
testLayerUsingCaffeModels("layer_mvn");
}
OCL_TEST(Layer_Test_MVN, Accuracy)
{
testLayerUsingCaffeModels("layer_mvn", DNN_TARGET_OPENCL);
} }
void testReshape(const MatShape& inputShape, const MatShape& targetShape, void testReshape(const MatShape& inputShape, const MatShape& targetShape,
@ -257,14 +213,9 @@ TEST(Layer_Test_BatchNorm, local_stats)
testLayerUsingCaffeModels("layer_batch_norm_local_stats", DNN_TARGET_CPU, true, false); testLayerUsingCaffeModels("layer_batch_norm_local_stats", DNN_TARGET_CPU, true, false);
} }
TEST(Layer_Test_ReLU, Accuracy) TEST_P(Test_Caffe_layers, ReLU)
{ {
testLayerUsingCaffeModels("layer_relu"); testLayerUsingCaffeModels("layer_relu", GetParam());
}
OCL_TEST(Layer_Test_ReLU, Accuracy)
{
testLayerUsingCaffeModels("layer_relu", DNN_TARGET_OPENCL);
} }
TEST(Layer_Test_Dropout, Accuracy) TEST(Layer_Test_Dropout, Accuracy)
@ -272,14 +223,9 @@ TEST(Layer_Test_Dropout, Accuracy)
testLayerUsingCaffeModels("layer_dropout"); testLayerUsingCaffeModels("layer_dropout");
} }
TEST(Layer_Test_Concat, Accuracy) TEST_P(Test_Caffe_layers, Concat)
{ {
testLayerUsingCaffeModels("layer_concat"); testLayerUsingCaffeModels("layer_concat", GetParam());
}
OCL_TEST(Layer_Test_Concat, Accuracy)
{
testLayerUsingCaffeModels("layer_concat", DNN_TARGET_OPENCL);
} }
TEST(Layer_Test_Fused_Concat, Accuracy) TEST(Layer_Test_Fused_Concat, Accuracy)
@ -325,26 +271,16 @@ TEST(Layer_Test_Fused_Concat, Accuracy)
testLayerUsingCaffeModels("layer_concat_shared_input", DNN_TARGET_CPU, true, false); testLayerUsingCaffeModels("layer_concat_shared_input", DNN_TARGET_CPU, true, false);
} }
TEST(Layer_Test_Eltwise, Accuracy) TEST_P(Test_Caffe_layers, Eltwise)
{ {
testLayerUsingCaffeModels("layer_eltwise"); testLayerUsingCaffeModels("layer_eltwise", GetParam());
} }
OCL_TEST(Layer_Test_Eltwise, Accuracy) TEST_P(Test_Caffe_layers, PReLU)
{ {
testLayerUsingCaffeModels("layer_eltwise", DNN_TARGET_OPENCL); int targetId = GetParam();
} testLayerUsingCaffeModels("layer_prelu", targetId, true);
testLayerUsingCaffeModels("layer_prelu_fc", targetId, true, false);
TEST(Layer_Test_PReLU, Accuracy)
{
testLayerUsingCaffeModels("layer_prelu", DNN_TARGET_CPU, true);
testLayerUsingCaffeModels("layer_prelu_fc", DNN_TARGET_CPU, true, false);
}
OCL_TEST(Layer_Test_PReLU, Accuracy)
{
testLayerUsingCaffeModels("layer_prelu", DNN_TARGET_OPENCL, true);
testLayerUsingCaffeModels("layer_prelu_fc", DNN_TARGET_OPENCL, true, false);
} }
//template<typename XMat> //template<typename XMat>
@ -385,14 +321,9 @@ static void test_Reshape_Split_Slice_layers(int targetId)
normAssert(input, output); normAssert(input, output);
} }
TEST(Layer_Test_Reshape_Split_Slice, Accuracy) TEST_P(Test_Caffe_layers, Reshape_Split_Slice)
{ {
test_Reshape_Split_Slice_layers(DNN_TARGET_CPU); test_Reshape_Split_Slice_layers(GetParam());
}
OCL_TEST(Layer_Test_Reshape_Split_Slice, Accuracy)
{
test_Reshape_Split_Slice_layers(DNN_TARGET_OPENCL);
} }
TEST(Layer_Conv_Elu, Accuracy) TEST(Layer_Conv_Elu, Accuracy)
@ -602,7 +533,6 @@ TEST(Layer_Test_ROIPooling, Accuracy)
normAssert(out, ref); normAssert(out, ref);
} }
typedef testing::TestWithParam<DNNTarget> Test_Caffe_layers;
TEST_P(Test_Caffe_layers, FasterRCNN_Proposal) TEST_P(Test_Caffe_layers, FasterRCNN_Proposal)
{ {
Net net = readNetFromCaffe(_tf("net_faster_rcnn_proposal.prototxt")); Net net = readNetFromCaffe(_tf("net_faster_rcnn_proposal.prototxt"));
@ -906,4 +836,104 @@ TEST(Test_DLDT, two_inputs)
} }
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
// Test a custom layer.
class InterpLayer CV_FINAL : public Layer
{
public:
InterpLayer(const LayerParams &params) : Layer(params)
{
zoomFactor = params.get<int>("zoom_factor", 0);
outWidth = params.get<int>("width", 0);
outHeight = params.get<int>("height", 0);
}
static Ptr<InterpLayer> create(LayerParams& params)
{
return Ptr<InterpLayer>(new InterpLayer(params));
}
virtual bool getMemoryShapes(const std::vector<std::vector<int> > &inputs,
const int requiredOutputs,
std::vector<std::vector<int> > &outputs,
std::vector<std::vector<int> > &internals) const CV_OVERRIDE
{
const int batchSize = inputs[0][0];
const int numChannels = inputs[0][1];
const int inpHeight = inputs[0][2];
const int inpWidth = inputs[0][3];
std::vector<int> outShape(4);
outShape[0] = batchSize;
outShape[1] = numChannels;
outShape[2] = outHeight != 0 ? outHeight : (inpHeight + (inpHeight - 1) * (zoomFactor - 1));
outShape[3] = outWidth != 0 ? outWidth : (inpWidth + (inpWidth - 1) * (zoomFactor - 1));
outputs.assign(1, outShape);
return false;
}
virtual void finalize(const std::vector<Mat*>& inputs, std::vector<Mat> &outputs) CV_OVERRIDE
{
if (!outWidth && !outHeight)
{
outHeight = outputs[0].size[2];
outWidth = outputs[0].size[3];
}
}
// Implementation of this custom layer is based on https://github.com/cdmh/deeplab-public/blob/master/src/caffe/layers/interp_layer.cpp
virtual void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat>& internals) CV_OVERRIDE
{
Mat& inp = *inputs[0];
Mat& out = outputs[0];
const float* inpData = (float*)inp.data;
float* outData = (float*)out.data;
const int batchSize = inp.size[0];
const int numChannels = inp.size[1];
const int inpHeight = inp.size[2];
const int inpWidth = inp.size[3];
const float rheight = (outHeight > 1) ? static_cast<float>(inpHeight - 1) / (outHeight - 1) : 0.f;
const float rwidth = (outWidth > 1) ? static_cast<float>(inpWidth - 1) / (outWidth - 1) : 0.f;
for (int h2 = 0; h2 < outHeight; ++h2)
{
const float h1r = rheight * h2;
const int h1 = h1r;
const int h1p = (h1 < inpHeight - 1) ? 1 : 0;
const float h1lambda = h1r - h1;
const float h0lambda = 1.f - h1lambda;
for (int w2 = 0; w2 < outWidth; ++w2)
{
const float w1r = rwidth * w2;
const int w1 = w1r;
const int w1p = (w1 < inpWidth - 1) ? 1 : 0;
const float w1lambda = w1r - w1;
const float w0lambda = 1.f - w1lambda;
const float* pos1 = inpData + h1 * inpWidth + w1;
float* pos2 = outData + h2 * outWidth + w2;
for (int c = 0; c < batchSize * numChannels; ++c)
{
pos2[0] =
h0lambda * (w0lambda * pos1[0] + w1lambda * pos1[w1p]) +
h1lambda * (w0lambda * pos1[h1p * inpWidth] + w1lambda * pos1[h1p * inpWidth + w1p]);
pos1 += inpWidth * inpHeight;
pos2 += outWidth * outHeight;
}
}
}
}
virtual void forward(InputArrayOfArrays, OutputArrayOfArrays, OutputArrayOfArrays) CV_OVERRIDE {}
private:
int outWidth, outHeight, zoomFactor;
};
TEST(Layer_Test_Interp, Accuracy)
{
CV_DNN_REGISTER_LAYER_CLASS(Interp, InterpLayer);
testLayerUsingCaffeModels("layer_interp", DNN_TARGET_CPU, false, false);
LayerFactory::unregisterLayer("Interp");
}
}} // namespace }} // namespace

View File

@ -7,6 +7,8 @@
#include "test_precomp.hpp" #include "test_precomp.hpp"
#include <opencv2/dnn/layer.details.hpp> // CV_DNN_REGISTER_LAYER_CLASS
namespace opencv_test { namespace { namespace opencv_test { namespace {
TEST(blobFromImage_4ch, Regression) TEST(blobFromImage_4ch, Regression)
@ -75,4 +77,64 @@ TEST(readNet, Regression)
EXPECT_FALSE(net.empty()); EXPECT_FALSE(net.empty());
} }
class FirstCustomLayer CV_FINAL : public Layer
{
public:
FirstCustomLayer(const LayerParams &params) : Layer(params) {}
static Ptr<Layer> create(LayerParams& params)
{
return Ptr<Layer>(new FirstCustomLayer(params));
}
virtual void forward(InputArrayOfArrays, OutputArrayOfArrays, OutputArrayOfArrays) CV_OVERRIDE {}
virtual void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat>& internals) CV_OVERRIDE
{
outputs[0].setTo(1);
}
};
class SecondCustomLayer CV_FINAL : public Layer
{
public:
SecondCustomLayer(const LayerParams &params) : Layer(params) {}
static Ptr<Layer> create(LayerParams& params)
{
return Ptr<Layer>(new SecondCustomLayer(params));
}
virtual void forward(InputArrayOfArrays, OutputArrayOfArrays, OutputArrayOfArrays) CV_OVERRIDE {}
virtual void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat>& internals) CV_OVERRIDE
{
outputs[0].setTo(2);
}
};
TEST(LayerFactory, custom_layers)
{
LayerParams lp;
lp.name = "name";
lp.type = "CustomType";
Mat inp(1, 1, CV_32FC1);
for (int i = 0; i < 3; ++i)
{
if (i == 0) { CV_DNN_REGISTER_LAYER_CLASS(CustomType, FirstCustomLayer); }
else if (i == 1) { CV_DNN_REGISTER_LAYER_CLASS(CustomType, SecondCustomLayer); }
else if (i == 2) { LayerFactory::unregisterLayer("CustomType"); }
Net net;
net.addLayerToPrev(lp.name, lp.type, lp);
net.setInput(inp);
Mat output = net.forward();
if (i == 0) EXPECT_EQ(output.at<float>(0), 1);
else if (i == 1) EXPECT_EQ(output.at<float>(0), 2);
else if (i == 2) EXPECT_EQ(output.at<float>(0), 1);
}
LayerFactory::unregisterLayer("CustomType");
}
}} // namespace }} // namespace

View File

@ -12,6 +12,8 @@ Test for Tensorflow models loading
#include "test_precomp.hpp" #include "test_precomp.hpp"
#include "npy_blob.hpp" #include "npy_blob.hpp"
#include <opencv2/dnn/layer.details.hpp> // CV_DNN_REGISTER_LAYER_CLASS
namespace opencv_test namespace opencv_test
{ {
@ -364,4 +366,95 @@ TEST(Test_TensorFlow, memory_read)
runTensorFlowNet("batch_norm_text", DNN_TARGET_CPU, true, l1, lInf, true); runTensorFlowNet("batch_norm_text", DNN_TARGET_CPU, true, l1, lInf, true);
} }
// Test a custom layer.
class ResizeBilinearLayer CV_FINAL : public Layer
{
public:
ResizeBilinearLayer(const LayerParams &params) : Layer(params)
{
CV_Assert(!params.get<bool>("align_corners", false));
CV_Assert(blobs.size() == 1, blobs[0].type() == CV_32SC1);
outHeight = blobs[0].at<int>(0, 0);
outWidth = blobs[0].at<int>(0, 1);
}
static Ptr<Layer> create(LayerParams& params)
{
return Ptr<Layer>(new ResizeBilinearLayer(params));
}
virtual bool getMemoryShapes(const std::vector<std::vector<int> > &inputs,
const int requiredOutputs,
std::vector<std::vector<int> > &outputs,
std::vector<std::vector<int> > &internals) const CV_OVERRIDE
{
std::vector<int> outShape(4);
outShape[0] = inputs[0][0]; // batch size
outShape[1] = inputs[0][1]; // number of channels
outShape[2] = outHeight;
outShape[3] = outWidth;
outputs.assign(1, outShape);
return false;
}
// This implementation is based on a reference implementation from
// https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h
virtual void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
Mat& inp = *inputs[0];
Mat& out = outputs[0];
const float* inpData = (float*)inp.data;
float* outData = (float*)out.data;
const int batchSize = inp.size[0];
const int numChannels = inp.size[1];
const int inpHeight = inp.size[2];
const int inpWidth = inp.size[3];
float heightScale = static_cast<float>(inpHeight) / outHeight;
float widthScale = static_cast<float>(inpWidth) / outWidth;
for (int b = 0; b < batchSize; ++b)
{
for (int y = 0; y < outHeight; ++y)
{
float input_y = y * heightScale;
int y0 = static_cast<int>(std::floor(input_y));
int y1 = std::min(y0 + 1, inpHeight - 1);
for (int x = 0; x < outWidth; ++x)
{
float input_x = x * widthScale;
int x0 = static_cast<int>(std::floor(input_x));
int x1 = std::min(x0 + 1, inpWidth - 1);
for (int c = 0; c < numChannels; ++c)
{
float interpolation =
inpData[offset(inp.size, c, x0, y0, b)] * (1 - (input_y - y0)) * (1 - (input_x - x0)) +
inpData[offset(inp.size, c, x0, y1, b)] * (input_y - y0) * (1 - (input_x - x0)) +
inpData[offset(inp.size, c, x1, y0, b)] * (1 - (input_y - y0)) * (input_x - x0) +
inpData[offset(inp.size, c, x1, y1, b)] * (input_y - y0) * (input_x - x0);
outData[offset(out.size, c, x, y, b)] = interpolation;
}
}
}
}
}
virtual void forward(InputArrayOfArrays, OutputArrayOfArrays, OutputArrayOfArrays) CV_OVERRIDE {}
private:
static inline int offset(const MatSize& size, int c, int x, int y, int b)
{
return x + size[3] * (y + size[2] * (c + size[1] * b));
}
int outWidth, outHeight;
};
TEST(Test_TensorFlow, resize_bilinear)
{
CV_DNN_REGISTER_LAYER_CLASS(ResizeBilinear, ResizeBilinearLayer);
runTensorFlowNet("resize_bilinear");
LayerFactory::unregisterLayer("ResizeBilinear");
}
} }

View File

@ -42,6 +42,7 @@
#include "test_precomp.hpp" #include "test_precomp.hpp"
#include "npy_blob.hpp" #include "npy_blob.hpp"
#include <opencv2/dnn/shape_utils.hpp> #include <opencv2/dnn/shape_utils.hpp>
#include <opencv2/dnn/layer.details.hpp> // CV_DNN_REGISTER_LAYER_CLASS
namespace opencv_test namespace opencv_test
{ {
@ -325,4 +326,62 @@ TEST(Torch_Importer, net_residual)
runTorchNet("net_residual", DNN_TARGET_CPU, "", false, true); runTorchNet("net_residual", DNN_TARGET_CPU, "", false, true);
} }
// Test a custom layer
// https://github.com/torch/nn/blob/master/doc/convolution.md#nn.SpatialUpSamplingNearest
class SpatialUpSamplingNearestLayer CV_FINAL : public Layer
{
public:
SpatialUpSamplingNearestLayer(const LayerParams &params) : Layer(params)
{
scale = params.get<int>("scale_factor");
}
static Ptr<Layer> create(LayerParams& params)
{
return Ptr<Layer>(new SpatialUpSamplingNearestLayer(params));
}
virtual bool getMemoryShapes(const std::vector<std::vector<int> > &inputs,
const int requiredOutputs,
std::vector<std::vector<int> > &outputs,
std::vector<std::vector<int> > &internals) const CV_OVERRIDE
{
std::vector<int> outShape(4);
outShape[0] = inputs[0][0]; // batch size
outShape[1] = inputs[0][1]; // number of channels
outShape[2] = scale * inputs[0][2];
outShape[3] = scale * inputs[0][3];
outputs.assign(1, outShape);
return false;
}
virtual void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
Mat& inp = *inputs[0];
Mat& out = outputs[0];
const int outHeight = out.size[2];
const int outWidth = out.size[3];
for (size_t n = 0; n < inputs[0]->size[0]; ++n)
{
for (size_t ch = 0; ch < inputs[0]->size[1]; ++ch)
{
resize(getPlane(inp, n, ch), getPlane(out, n, ch),
Size(outWidth, outHeight), 0, 0, INTER_NEAREST);
}
}
}
virtual void forward(InputArrayOfArrays, OutputArrayOfArrays, OutputArrayOfArrays) CV_OVERRIDE {}
private:
int scale;
};
TEST(Torch_Importer, upsampling_nearest)
{
CV_DNN_REGISTER_LAYER_CLASS(SpatialUpSamplingNearest, SpatialUpSamplingNearestLayer);
runTorchNet("net_spatial_upsampling_nearest", DNN_TARGET_CPU, "", false, true);
LayerFactory::unregisterLayer("SpatialUpSamplingNearest");
}
} }

View File

@ -1360,11 +1360,13 @@ Ptr<DescriptorMatcher> FlannBasedMatcher::clone( bool emptyTrainData ) const
{ {
CV_Error( Error::StsNotImplemented, "deep clone functionality is not implemented, because " CV_Error( Error::StsNotImplemented, "deep clone functionality is not implemented, because "
"Flann::Index has not copy constructor or clone method "); "Flann::Index has not copy constructor or clone method ");
#if 0
//matcher->flannIndex; //matcher->flannIndex;
matcher->addedDescCount = addedDescCount; matcher->addedDescCount = addedDescCount;
matcher->mergedDescriptors = DescriptorCollection( mergedDescriptors ); matcher->mergedDescriptors = DescriptorCollection( mergedDescriptors );
std::transform( trainDescCollection.begin(), trainDescCollection.end(), std::transform( trainDescCollection.begin(), trainDescCollection.end(),
matcher->trainDescCollection.begin(), clone_op ); matcher->trainDescCollection.begin(), clone_op );
#endif
} }
return matcher; return matcher;
} }

View File

@ -530,7 +530,6 @@ static const char* NO_QT_ERR_MSG = "The library is compiled without QT support";
cv::QtFont cv::fontQt(const String&, int, Scalar, int, int, int) cv::QtFont cv::fontQt(const String&, int, Scalar, int, int, int)
{ {
CV_Error(CV_StsNotImplemented, NO_QT_ERR_MSG); CV_Error(CV_StsNotImplemented, NO_QT_ERR_MSG);
return QtFont();
} }
void cv::addText( const Mat&, const String&, Point, const QtFont&) void cv::addText( const Mat&, const String&, Point, const QtFont&)
@ -556,7 +555,6 @@ void cv::displayOverlay(const String&, const String&, int )
int cv::startLoop(int (*)(int argc, char *argv[]), int , char**) int cv::startLoop(int (*)(int argc, char *argv[]), int , char**)
{ {
CV_Error(CV_StsNotImplemented, NO_QT_ERR_MSG); CV_Error(CV_StsNotImplemented, NO_QT_ERR_MSG);
return 0;
} }
void cv::stopLoop() void cv::stopLoop()
@ -577,7 +575,6 @@ void cv::loadWindowParameters(const String&)
int cv::createButton(const String&, ButtonCallback, void*, int , bool ) int cv::createButton(const String&, ButtonCallback, void*, int , bool )
{ {
CV_Error(CV_StsNotImplemented, NO_QT_ERR_MSG); CV_Error(CV_StsNotImplemented, NO_QT_ERR_MSG);
return 0;
} }
#endif #endif
@ -606,17 +603,16 @@ void cv::setWindowTitle(const String&, const String&)
} }
#define CV_NO_GUI_ERROR(funcname) \ #define CV_NO_GUI_ERROR(funcname) \
cvError( CV_StsError, funcname, \ cv::errorNoReturn(cv::Error::StsError, \
"The function is not implemented. " \ "The function is not implemented. " \
"Rebuild the library with Windows, GTK+ 2.x or Carbon support. "\ "Rebuild the library with Windows, GTK+ 2.x or Carbon support. "\
"If you are on Ubuntu or Debian, install libgtk2.0-dev and pkg-config, then re-run cmake or configure script", \ "If you are on Ubuntu or Debian, install libgtk2.0-dev and pkg-config, then re-run cmake or configure script", \
__FILE__, __LINE__ ) funcname, __FILE__, __LINE__)
CV_IMPL int cvNamedWindow( const char*, int ) CV_IMPL int cvNamedWindow( const char*, int )
{ {
CV_NO_GUI_ERROR("cvNamedWindow"); CV_NO_GUI_ERROR("cvNamedWindow");
return -1;
} }
CV_IMPL void cvDestroyWindow( const char* ) CV_IMPL void cvDestroyWindow( const char* )
@ -651,7 +647,6 @@ cvCreateTrackbar( const char*, const char*,
int*, int, CvTrackbarCallback ) int*, int, CvTrackbarCallback )
{ {
CV_NO_GUI_ERROR( "cvCreateTrackbar" ); CV_NO_GUI_ERROR( "cvCreateTrackbar" );
return -1;
} }
CV_IMPL int CV_IMPL int
@ -660,7 +655,6 @@ cvCreateTrackbar2( const char* /*trackbar_name*/, const char* /*window_name*/,
void* /*userdata*/ ) void* /*userdata*/ )
{ {
CV_NO_GUI_ERROR( "cvCreateTrackbar2" ); CV_NO_GUI_ERROR( "cvCreateTrackbar2" );
return -1;
} }
CV_IMPL void CV_IMPL void
@ -672,7 +666,6 @@ cvSetMouseCallback( const char*, CvMouseCallback, void* )
CV_IMPL int cvGetTrackbarPos( const char*, const char* ) CV_IMPL int cvGetTrackbarPos( const char*, const char* )
{ {
CV_NO_GUI_ERROR( "cvGetTrackbarPos" ); CV_NO_GUI_ERROR( "cvGetTrackbarPos" );
return -1;
} }
CV_IMPL void cvSetTrackbarPos( const char*, const char*, int ) CV_IMPL void cvSetTrackbarPos( const char*, const char*, int )
@ -693,33 +686,28 @@ CV_IMPL void cvSetTrackbarMin(const char*, const char*, int)
CV_IMPL void* cvGetWindowHandle( const char* ) CV_IMPL void* cvGetWindowHandle( const char* )
{ {
CV_NO_GUI_ERROR( "cvGetWindowHandle" ); CV_NO_GUI_ERROR( "cvGetWindowHandle" );
return 0;
} }
CV_IMPL const char* cvGetWindowName( void* ) CV_IMPL const char* cvGetWindowName( void* )
{ {
CV_NO_GUI_ERROR( "cvGetWindowName" ); CV_NO_GUI_ERROR( "cvGetWindowName" );
return 0;
} }
CV_IMPL int cvWaitKey( int ) CV_IMPL int cvWaitKey( int )
{ {
CV_NO_GUI_ERROR( "cvWaitKey" ); CV_NO_GUI_ERROR( "cvWaitKey" );
return -1;
} }
CV_IMPL int cvInitSystem( int , char** ) CV_IMPL int cvInitSystem( int , char** )
{ {
CV_NO_GUI_ERROR( "cvInitSystem" ); CV_NO_GUI_ERROR( "cvInitSystem" );
return -1;
} }
CV_IMPL int cvStartWindowThread() CV_IMPL int cvStartWindowThread()
{ {
CV_NO_GUI_ERROR( "cvStartWindowThread" ); CV_NO_GUI_ERROR( "cvStartWindowThread" );
return -1;
} }
//-------- Qt --------- //-------- Qt ---------
@ -742,7 +730,6 @@ CV_IMPL int cvStartLoop(int (*)(int argc, char *argv[]), int , char* argv[])
{ {
(void)argv; (void)argv;
CV_NO_GUI_ERROR("cvStartLoop"); CV_NO_GUI_ERROR("cvStartLoop");
return -1;
} }
CV_IMPL void cvStopLoop() CV_IMPL void cvStopLoop()
@ -763,7 +750,6 @@ CV_IMPL void cvSaveWindowParameters(const char* )
CV_IMPL int cvCreateButton(const char*, void (*)(int, void*), void*, int, int) CV_IMPL int cvCreateButton(const char*, void (*)(int, void*), void*, int, int)
{ {
CV_NO_GUI_ERROR("cvCreateButton"); CV_NO_GUI_ERROR("cvCreateButton");
return -1;
} }

View File

@ -490,7 +490,7 @@ decode_rle8_bad: ;
result = true; result = true;
break; break;
default: default:
CV_ErrorNoReturn(cv::Error::StsError, "Invalid/unsupported mode"); CV_Error(cv::Error::StsError, "Invalid/unsupported mode");
} }
} }
CV_CATCH_ALL CV_CATCH_ALL

View File

@ -409,7 +409,7 @@ bool GdalDecoder::readData( Mat& img ){
color = 3; color = 3;
break; break;
default: default:
CV_ErrorNoReturn(cv::Error::StsError, "Invalid/unsupported mode"); CV_Error(cv::Error::StsError, "Invalid/unsupported mode");
} }
// make sure the image band has the same dimensions as the image // make sure the image band has the same dimensions as the image

View File

@ -77,7 +77,7 @@ static int ReadNumber(RLByteStream& strm, int maxdigits = 0)
else else
{ {
#if 1 #if 1
CV_ErrorNoReturn_(Error::StsError, ("PXM: Unexpected code in ReadNumber(): 0x%x (%d)", code, code)); CV_Error_(Error::StsError, ("PXM: Unexpected code in ReadNumber(): 0x%x (%d)", code, code));
#else #else
code = strm.getByte(); code = strm.getByte();
#endif #endif
@ -354,7 +354,7 @@ bool PxMDecoder::readData( Mat& img )
break; break;
} }
default: default:
CV_ErrorNoReturn(Error::StsError, "m_bpp is not supported"); CV_Error(Error::StsError, "m_bpp is not supported");
} }
} }
CV_CATCH (cv::Exception, e) CV_CATCH (cv::Exception, e)

View File

@ -722,7 +722,7 @@ bool imwrite( const String& filename, InputArray _img,
else if (_img.isMatVector() || _img.isUMatVector()) else if (_img.isMatVector() || _img.isUMatVector())
_img.getMatVector(img_vec); _img.getMatVector(img_vec);
else else
CV_ErrorNoReturn(Error::StsBadArg, "Unknown/unsupported input encountered"); CV_Error(Error::StsBadArg, "Unknown/unsupported input encountered");
CV_Assert(!img_vec.empty()); CV_Assert(!img_vec.empty());
return imwrite_(filename, img_vec, params, false); return imwrite_(filename, img_vec, params, false);

View File

@ -99,7 +99,6 @@ static int rgbe_error(int rgbe_error_code, const char *msg)
CV_Error(cv::Error::StsError, cv::String("RGBE error: \n") + CV_Error(cv::Error::StsError, cv::String("RGBE error: \n") +
cv::String(msg)); cv::String(msg));
} }
return RGBE_RETURN_FAILURE;
} }
/* standard conversion from float pixels to rgbe pixels */ /* standard conversion from float pixels to rgbe pixels */

View File

@ -126,9 +126,8 @@ static Mat interp1(InputArray _x, InputArray _Y, InputArray _xi)
case CV_32SC1: return interp1_<int>(x,Y,xi); break; case CV_32SC1: return interp1_<int>(x,Y,xi); break;
case CV_32FC1: return interp1_<float>(x,Y,xi); break; case CV_32FC1: return interp1_<float>(x,Y,xi); break;
case CV_64FC1: return interp1_<double>(x,Y,xi); break; case CV_64FC1: return interp1_<double>(x,Y,xi); break;
default: CV_Error(Error::StsUnsupportedFormat, ""); break;
} }
return Mat(); CV_Error(Error::StsUnsupportedFormat, "");
} }
namespace colormap namespace colormap

View File

@ -3980,7 +3980,6 @@ namespace cv{
} }
CV_Error(CV_StsUnsupportedFormat, "unsupported label/image type"); CV_Error(CV_StsUnsupportedFormat, "unsupported label/image type");
return -1;
} }
} }
@ -4003,7 +4002,6 @@ int cv::connectedComponents(InputArray img_, OutputArray _labels, int connectivi
} }
else{ else{
CV_Error(CV_StsUnsupportedFormat, "the type of labels must be 16u or 32s"); CV_Error(CV_StsUnsupportedFormat, "the type of labels must be 16u or 32s");
return 0;
} }
} }

View File

@ -3642,8 +3642,6 @@ cv::Ptr<cv::BaseRowFilter> cv::getLinearRowFilter( int srcType, int bufType,
CV_Error_( CV_StsNotImplemented, CV_Error_( CV_StsNotImplemented,
("Unsupported combination of source format (=%d), and buffer format (=%d)", ("Unsupported combination of source format (=%d), and buffer format (=%d)",
srcType, bufType)); srcType, bufType));
return Ptr<BaseRowFilter>();
} }
@ -3739,8 +3737,6 @@ cv::Ptr<cv::BaseColumnFilter> cv::getLinearColumnFilter( int bufType, int dstTyp
CV_Error_( CV_StsNotImplemented, CV_Error_( CV_StsNotImplemented,
("Unsupported combination of buffer format (=%d), and destination format (=%d)", ("Unsupported combination of buffer format (=%d), and destination format (=%d)",
bufType, dstType)); bufType, dstType));
return Ptr<BaseColumnFilter>();
} }
@ -4491,8 +4487,6 @@ cv::Ptr<cv::BaseFilter> cv::getLinearFilter(int srcType, int dstType,
CV_Error_( CV_StsNotImplemented, CV_Error_( CV_StsNotImplemented,
("Unsupported combination of source format (=%d), and destination format (=%d)", ("Unsupported combination of source format (=%d), and destination format (=%d)",
srcType, dstType)); srcType, dstType));
return Ptr<BaseFilter>();
} }

View File

@ -888,7 +888,6 @@ cv::Ptr<cv::BaseRowFilter> cv::getMorphologyRowFilter(int op, int type, int ksiz
} }
CV_Error_( CV_StsNotImplemented, ("Unsupported data type (=%d)", type)); CV_Error_( CV_StsNotImplemented, ("Unsupported data type (=%d)", type));
return Ptr<BaseRowFilter>();
} }
cv::Ptr<cv::BaseColumnFilter> cv::getMorphologyColumnFilter(int op, int type, int ksize, int anchor) cv::Ptr<cv::BaseColumnFilter> cv::getMorphologyColumnFilter(int op, int type, int ksize, int anchor)
@ -935,7 +934,6 @@ cv::Ptr<cv::BaseColumnFilter> cv::getMorphologyColumnFilter(int op, int type, in
} }
CV_Error_( CV_StsNotImplemented, ("Unsupported data type (=%d)", type)); CV_Error_( CV_StsNotImplemented, ("Unsupported data type (=%d)", type));
return Ptr<BaseColumnFilter>();
} }
@ -973,7 +971,6 @@ cv::Ptr<cv::BaseFilter> cv::getMorphologyFilter(int op, int type, InputArray _ke
} }
CV_Error_( CV_StsNotImplemented, ("Unsupported data type (=%d)", type)); CV_Error_( CV_StsNotImplemented, ("Unsupported data type (=%d)", type));
return Ptr<BaseFilter>();
} }

View File

@ -113,9 +113,7 @@ inline bool isStorageOrMat(void * arr)
return true; return true;
else if (CV_IS_MAT( arr )) else if (CV_IS_MAT( arr ))
return false; return false;
else CV_Error( CV_StsBadArg, "Destination is not CvMemStorage* nor CvMat*" );
CV_Error( CV_StsBadArg, "Destination is not CvMemStorage* nor CvMat*" );
return false;
} }
#endif /*__OPENCV_CV_INTERNAL_H_*/ #endif /*__OPENCV_CV_INTERNAL_H_*/

View File

@ -1332,8 +1332,6 @@ cv::Ptr<cv::BaseRowFilter> cv::getRowSumFilter(int srcType, int sumType, int ksi
CV_Error_( CV_StsNotImplemented, CV_Error_( CV_StsNotImplemented,
("Unsupported combination of source format (=%d), and buffer format (=%d)", ("Unsupported combination of source format (=%d), and buffer format (=%d)",
srcType, sumType)); srcType, sumType));
return Ptr<BaseRowFilter>();
} }
@ -1374,8 +1372,6 @@ cv::Ptr<cv::BaseColumnFilter> cv::getColumnSumFilter(int sumType, int dstType, i
CV_Error_( CV_StsNotImplemented, CV_Error_( CV_StsNotImplemented,
("Unsupported combination of sum format (=%d), and destination format (=%d)", ("Unsupported combination of sum format (=%d), and destination format (=%d)",
sumType, dstType)); sumType, dstType));
return Ptr<BaseColumnFilter>();
} }
@ -1656,8 +1652,6 @@ static Ptr<BaseRowFilter> getSqrRowSumFilter(int srcType, int sumType, int ksize
CV_Error_( CV_StsNotImplemented, CV_Error_( CV_StsNotImplemented,
("Unsupported combination of source format (=%d), and buffer format (=%d)", ("Unsupported combination of source format (=%d), and buffer format (=%d)",
srcType, sumType)); srcType, sumType));
return Ptr<BaseRowFilter>();
} }
} }

View File

@ -613,7 +613,6 @@ static Point2f mapPointSpherical(const Point2f& p, float alpha, Vec4d* J, int pr
return Point2f((float)asin(x1), (float)asin(y1)); return Point2f((float)asin(x1), (float)asin(y1));
} }
CV_Error(CV_StsBadArg, "Unknown projection type"); CV_Error(CV_StsBadArg, "Unknown projection type");
return Point2f();
} }

View File

@ -125,7 +125,6 @@ template <typename T> bool CV_BoundingRectTest::checking_function_work(vector <P
cout << "Result rect (x, y, w, h): [" << rect[i].x << ", " << rect[i].y << ", " << rect[i].width << ", " << rect[i].height << "]" << endl; cout << "Result rect (x, y, w, h): [" << rect[i].x << ", " << rect[i].y << ", " << rect[i].width << ", " << rect[i].height << "]" << endl;
cout << endl; cout << endl;
CV_Error(IMGPROC_BOUNDINGRECT_ERROR_DIFF, MESSAGE_ERROR_DIFF); CV_Error(IMGPROC_BOUNDINGRECT_ERROR_DIFF, MESSAGE_ERROR_DIFF);
return false;
} }
} }

View File

@ -58,7 +58,6 @@ int str_to_svm_type(String& str)
if( !str.compare("NU_SVR") ) if( !str.compare("NU_SVR") )
return SVM::NU_SVR; return SVM::NU_SVR;
CV_Error( CV_StsBadArg, "incorrect svm type string" ); CV_Error( CV_StsBadArg, "incorrect svm type string" );
return -1;
} }
int str_to_svm_kernel_type( String& str ) int str_to_svm_kernel_type( String& str )
{ {
@ -71,7 +70,6 @@ int str_to_svm_kernel_type( String& str )
if( !str.compare("SIGMOID") ) if( !str.compare("SIGMOID") )
return SVM::SIGMOID; return SVM::SIGMOID;
CV_Error( CV_StsBadArg, "incorrect svm type string" ); CV_Error( CV_StsBadArg, "incorrect svm type string" );
return -1;
} }
// 4. em // 4. em
@ -85,7 +83,6 @@ int str_to_ann_train_method( String& str )
if (!str.compare("ANNEAL")) if (!str.compare("ANNEAL"))
return ANN_MLP::ANNEAL; return ANN_MLP::ANNEAL;
CV_Error( CV_StsBadArg, "incorrect ann train method string" ); CV_Error( CV_StsBadArg, "incorrect ann train method string" );
return -1;
} }
#if 0 #if 0
@ -102,7 +99,6 @@ int str_to_ann_activation_function(String& str)
if (!str.compare("LEAKYRELU")) if (!str.compare("LEAKYRELU"))
return ANN_MLP::LEAKYRELU; return ANN_MLP::LEAKYRELU;
CV_Error(CV_StsBadArg, "incorrect ann activation function string"); CV_Error(CV_StsBadArg, "incorrect ann activation function string");
return -1;
} }
#endif #endif
@ -374,7 +370,6 @@ int str_to_boost_type( String& str )
if ( !str.compare("GENTLE") ) if ( !str.compare("GENTLE") )
return Boost::GENTLE; return Boost::GENTLE;
CV_Error( CV_StsBadArg, "incorrect boost type string" ); CV_Error( CV_StsBadArg, "incorrect boost type string" );
return -1;
} }
// 8. rtrees // 8. rtrees
@ -387,7 +382,6 @@ int str_to_svmsgd_type( String& str )
if ( !str.compare("ASGD") ) if ( !str.compare("ASGD") )
return SVMSGD::ASGD; return SVMSGD::ASGD;
CV_Error( CV_StsBadArg, "incorrect svmsgd type string" ); CV_Error( CV_StsBadArg, "incorrect svmsgd type string" );
return -1;
} }
int str_to_margin_type( String& str ) int str_to_margin_type( String& str )
@ -397,7 +391,6 @@ int str_to_margin_type( String& str )
if ( !str.compare("HARD_MARGIN") ) if ( !str.compare("HARD_MARGIN") )
return SVMSGD::HARD_MARGIN; return SVMSGD::HARD_MARGIN;
CV_Error( CV_StsBadArg, "incorrect svmsgd margin type string" ); CV_Error( CV_StsBadArg, "incorrect svmsgd margin type string" );
return -1;
} }
} }

View File

@ -54,7 +54,6 @@ Ptr<ExposureCompensator> ExposureCompensator::createDefault(int type)
if (type == GAIN_BLOCKS) if (type == GAIN_BLOCKS)
return makePtr<BlocksGainCompensator>(); return makePtr<BlocksGainCompensator>();
CV_Error(Error::StsBadArg, "unsupported exposure compensation method"); CV_Error(Error::StsBadArg, "unsupported exposure compensation method");
return Ptr<ExposureCompensator>();
} }

View File

@ -836,7 +836,7 @@ std::string findDataFile(const std::string& relative_path, bool required)
#endif #endif
#endif #endif
if (required) if (required)
CV_ErrorNoReturn(cv::Error::StsError, cv::format("OpenCV tests: Can't find required data file: %s", relative_path.c_str())); CV_Error(cv::Error::StsError, cv::format("OpenCV tests: Can't find required data file: %s", relative_path.c_str()));
throw SkipTestException(cv::format("OpenCV tests: Can't find data file: %s", relative_path.c_str())); throw SkipTestException(cv::format("OpenCV tests: Can't find data file: %s", relative_path.c_str()));
} }

View File

@ -404,7 +404,7 @@ double cv::findTransformECC(InputArray templateImage,
Mat templateFloat = Mat(hs, ws, CV_32F);// to store the (smoothed) template Mat templateFloat = Mat(hs, ws, CV_32F);// to store the (smoothed) template
Mat imageFloat = Mat(hd, wd, CV_32F);// to store the (smoothed) input image Mat imageFloat = Mat(hd, wd, CV_32F);// to store the (smoothed) input image
Mat imageWarped = Mat(hs, ws, CV_32F);// to store the warped zero-mean input image Mat imageWarped = Mat(hs, ws, CV_32F);// to store the warped zero-mean input image
Mat imageMask = Mat(hs, ws, CV_8U); //to store the final mask Mat imageMask = Mat(hs, ws, CV_8U); // to store the final mask
Mat inputMaskMat = inputMask.getMat(); Mat inputMaskMat = inputMask.getMat();
//to use it for mask warping //to use it for mask warping

View File

@ -106,7 +106,8 @@ protected:
bool parseMovi(frame_list& in_frame_list) bool parseMovi(frame_list& in_frame_list)
{ {
//not implemented //not implemented
in_frame_list.empty(); CV_UNUSED(in_frame_list);
// FIXIT: in_frame_list.empty();
return true; return true;
} }
bool parseStrl(char stream_id, Codecs codec_); bool parseStrl(char stream_id, Codecs codec_);

View File

@ -40,6 +40,8 @@
//M*/ //M*/
#include "precomp.hpp" #include "precomp.hpp"
#include <iostream>
using namespace std;
#include "cap_intelperc.hpp" #include "cap_intelperc.hpp"
#include "cap_dshow.hpp" #include "cap_dshow.hpp"
@ -63,7 +65,7 @@
#if defined(__clang__) #if defined(__clang__)
#pragma clang diagnostic ignored "-Wimplicit-fallthrough" #pragma clang diagnostic ignored "-Wimplicit-fallthrough"
#endif #endif
#if defined(__GNUC__) #if defined(__GNUC__) && __GNUC__ >= 7
#pragma GCC diagnostic ignored "-Wimplicit-fallthrough" #pragma GCC diagnostic ignored "-Wimplicit-fallthrough"
#endif #endif
@ -200,12 +202,6 @@ CV_IMPL CvCapture * cvCreateCameraCapture (int index)
TRY_OPEN(capture, cvCreateCameraCapture_V4L(index)) TRY_OPEN(capture, cvCreateCameraCapture_V4L(index))
#endif #endif
#ifdef HAVE_GSTREAMER
TRY_OPEN(capture, cvCreateCapture_GStreamer(CV_CAP_GSTREAMER_V4L2, reinterpret_cast<char *>(index)))
TRY_OPEN(capture, cvCreateCapture_GStreamer(CV_CAP_GSTREAMER_V4L, reinterpret_cast<char *>(index)))
#endif
if (pref) break; // CAP_VFW or CAP_V4L or CAP_V4L2 if (pref) break; // CAP_VFW or CAP_V4L or CAP_V4L2
case CAP_FIREWIRE: case CAP_FIREWIRE:
@ -221,11 +217,6 @@ CV_IMPL CvCapture * cvCreateCameraCapture (int index)
TRY_OPEN(capture, cvCreateCameraCapture_CMU(index)) TRY_OPEN(capture, cvCreateCameraCapture_CMU(index))
#endif #endif
#if defined(HAVE_GSTREAMER) && 0
// Re-enable again when gstreamer 1394 support will land in the backend code
TRY_OPEN(capture, cvCreateCapture_GStreamer(CV_CAP_GSTREAMER_1394, 0))
#endif
if (pref) break; // CAP_FIREWIRE if (pref) break; // CAP_FIREWIRE
#ifdef HAVE_MIL #ifdef HAVE_MIL
@ -330,12 +321,6 @@ CV_IMPL CvCapture * cvCreateFileCaptureWithPreference (const char * filename, in
if (apiPreference) break; if (apiPreference) break;
#endif #endif
#ifdef HAVE_GSTREAMER
case CAP_GSTREAMER:
TRY_OPEN(result, cvCreateCapture_GStreamer (CV_CAP_GSTREAMER_FILE, filename))
if (apiPreference) break;
#endif
#if defined(HAVE_QUICKTIME) || defined(HAVE_QTKIT) #if defined(HAVE_QUICKTIME) || defined(HAVE_QTKIT)
case CAP_QT: case CAP_QT:
TRY_OPEN(result, cvCreateFileCapture_QT (filename)) TRY_OPEN(result, cvCreateFileCapture_QT (filename))
@ -463,6 +448,9 @@ static Ptr<IVideoCapture> IVideoCapture_create(int index)
{ {
int domains[] = int domains[] =
{ {
#ifdef HAVE_GSTREAMER
CAP_GSTREAMER,
#endif
#ifdef HAVE_DSHOW #ifdef HAVE_DSHOW
CAP_DSHOW, CAP_DSHOW,
#endif #endif
@ -490,7 +478,8 @@ static Ptr<IVideoCapture> IVideoCapture_create(int index)
// try every possibly installed camera API // try every possibly installed camera API
for (int i = 0; domains[i] >= 0; i++) for (int i = 0; domains[i] >= 0; i++)
{ {
#if defined(HAVE_DSHOW) || \ #if defined(HAVE_GSTREAMER) || \
defined(HAVE_DSHOW) || \
defined(HAVE_INTELPERC) || \ defined(HAVE_INTELPERC) || \
defined(WINRT_VIDEO) || \ defined(WINRT_VIDEO) || \
defined(HAVE_GPHOTO2) || \ defined(HAVE_GPHOTO2) || \
@ -499,6 +488,11 @@ static Ptr<IVideoCapture> IVideoCapture_create(int index)
switch (domains[i]) switch (domains[i])
{ {
#ifdef HAVE_GSTREAMER
case CAP_GSTREAMER:
capture = createGStreamerCapture(index);
break;
#endif
#ifdef HAVE_DSHOW #ifdef HAVE_DSHOW
case CAP_DSHOW: case CAP_DSHOW:
capture = makePtr<VideoCapture_DShow>(index); capture = makePtr<VideoCapture_DShow>(index);
@ -536,6 +530,14 @@ static Ptr<IVideoCapture> IVideoCapture_create(const String& filename, int apiPr
{ {
bool useAny = (apiPreference == CAP_ANY); bool useAny = (apiPreference == CAP_ANY);
Ptr<IVideoCapture> capture; Ptr<IVideoCapture> capture;
#ifdef HAVE_GSTREAMER
if (useAny || apiPreference == CAP_GSTREAMER)
{
capture = createGStreamerCapture(filename);
if (capture && capture->isOpened())
return capture;
}
#endif
#ifdef HAVE_XINE #ifdef HAVE_XINE
if (useAny || apiPreference == CAP_XINE) if (useAny || apiPreference == CAP_XINE)
{ {

File diff suppressed because it is too large Load Diff

View File

@ -96,7 +96,6 @@ static bool createEncodeHuffmanTable( const int* src, unsigned* table, int max_s
if( size > max_size ) if( size > max_size )
{ {
CV_Error(CV_StsOutOfRange, "too big maximum Huffman code size"); CV_Error(CV_StsOutOfRange, "too big maximum Huffman code size");
return false;
} }
memset( table, 0, size*sizeof(table[0])); memset( table, 0, size*sizeof(table[0]));

View File

@ -139,7 +139,6 @@ CvVideoWriter* cvCreateVideoWriter_Images(const char* filename);
#define CV_CAP_GSTREAMER_V4L2 2 #define CV_CAP_GSTREAMER_V4L2 2
#define CV_CAP_GSTREAMER_FILE 3 #define CV_CAP_GSTREAMER_FILE 3
CvCapture* cvCreateCapture_GStreamer(int type, const char *filename);
CvCapture* cvCreateFileCapture_FFMPEG_proxy(const char* filename); CvCapture* cvCreateFileCapture_FFMPEG_proxy(const char* filename);
@ -194,7 +193,11 @@ namespace cv
Ptr<IVideoCapture> createGPhoto2Capture(int index); Ptr<IVideoCapture> createGPhoto2Capture(int index);
Ptr<IVideoCapture> createGPhoto2Capture(const String& deviceName); Ptr<IVideoCapture> createGPhoto2Capture(const String& deviceName);
Ptr<IVideoCapture> createXINECapture(const char* filename); Ptr<IVideoCapture> createXINECapture(const char* filename);
Ptr<IVideoCapture> createGStreamerCapture(const String& filename);
Ptr<IVideoCapture> createGStreamerCapture(int index);
} }
#endif /* __VIDEOIO_H_ */ #endif /* __VIDEOIO_H_ */

View File

@ -7,6 +7,7 @@
namespace opencv_test namespace opencv_test
{ {
typedef tuple< string, Size, Size, int > Param; typedef tuple< string, Size, Size, int > Param;
typedef testing::TestWithParam< Param > Videoio_Gstreamer_Test; typedef testing::TestWithParam< Param > Videoio_Gstreamer_Test;
@ -19,8 +20,9 @@ TEST_P(Videoio_Gstreamer_Test, test_object_structure)
int count_frames = 10; int count_frames = 10;
std::ostringstream pipeline; std::ostringstream pipeline;
pipeline << "videotestsrc pattern=ball num-buffers=" << count_frames << " ! " << format; pipeline << "videotestsrc pattern=ball num-buffers=" << count_frames << " ! " << format;
pipeline << ", framerate=1/1, width=" << frame_size.width << ", height=" << frame_size.height << " ! appsink"; pipeline << ", width=" << frame_size.width << ", height=" << frame_size.height << " ! appsink";
VideoCapture cap(pipeline.str(), CAP_GSTREAMER); VideoCapture cap;
ASSERT_NO_THROW(cap.open(pipeline.str(), CAP_GSTREAMER));
ASSERT_TRUE(cap.isOpened()); ASSERT_TRUE(cap.isOpened());
Mat buffer, decode_frame, gray_frame, rgb_frame; Mat buffer, decode_frame, gray_frame, rgb_frame;

View File

@ -46,12 +46,62 @@
namespace opencv_test namespace opencv_test
{ {
struct VideoCaptureAPI
{
VideoCaptureAPIs api;
inline const char * toString() const
{
switch (api)
{
case CAP_ANY: return "CAP_ANY";
#ifdef __linux__
case CAP_V4L2: return "CAP_V4L/CAP_V4L2";
#else
case CAP_VFW: return "CAP_VFW";
#endif
case CAP_FIREWIRE: return "CAP_FIREWIRE";
case CAP_QT: return "CAP_QT";
case CAP_UNICAP: return "CAP_UNICAP";
case CAP_DSHOW: return "CAP_DSHOW";
case CAP_PVAPI: return "CAP_PVAPI";
case CAP_OPENNI: return "CAP_OPENNI";
case CAP_OPENNI_ASUS: return "CAP_OPENNI_ASUS";
case CAP_ANDROID: return "CAP_ANDROID";
case CAP_XIAPI: return "CAP_XIAPI";
case CAP_AVFOUNDATION: return "CAP_AVFOUNDATION";
case CAP_GIGANETIX: return "CAP_GIGANETIX";
case CAP_MSMF: return "CAP_MSMF";
case CAP_WINRT: return "CAP_WINRT";
case CAP_INTELPERC: return "CAP_INTELPERC";
case CAP_OPENNI2: return "CAP_OPENNI2";
case CAP_OPENNI2_ASUS: return "CAP_OPENNI2_ASUS";
case CAP_GPHOTO2: return "CAP_GPHOTO2";
case CAP_GSTREAMER: return "CAP_GSTREAMER";
case CAP_FFMPEG: return "CAP_FFMPEG";
case CAP_IMAGES: return "CAP_IMAGES";
case CAP_ARAVIS: return "CAP_ARAVIS";
case CAP_OPENCV_MJPEG: return "CAP_OPENCV_MJPEG";
case CAP_INTEL_MFX: return "CAP_INTEL_MFX";
case CAP_XINE: return "CAP_XINE";
}
return "unknown";
}
VideoCaptureAPI(int api_ = CAP_ANY) : api((VideoCaptureAPIs)api_) {}
operator int() { return api; }
};
inline std::ostream &operator<<(std::ostream &out, const VideoCaptureAPI & api)
{
out << api.toString(); return out;
}
class Videoio_Test_Base class Videoio_Test_Base
{ {
protected: protected:
string ext; string ext;
string video_file; string video_file;
int apiPref; VideoCaptureAPI apiPref;
protected: protected:
Videoio_Test_Base() {} Videoio_Test_Base() {}
virtual ~Videoio_Test_Base() {} virtual ~Videoio_Test_Base() {}
@ -60,14 +110,16 @@ protected:
void checkFrameRead(int idx, VideoCapture & cap) void checkFrameRead(int idx, VideoCapture & cap)
{ {
//int frameID = (int)cap.get(CAP_PROP_POS_FRAMES); //int frameID = (int)cap.get(CAP_PROP_POS_FRAMES);
Mat img; cap >> img; Mat img;
ASSERT_NO_THROW(cap >> img);
//std::cout << "idx=" << idx << " img=" << img.size() << " frameID=" << frameID << std::endl; //std::cout << "idx=" << idx << " img=" << img.size() << " frameID=" << frameID << std::endl;
ASSERT_FALSE(img.empty()) << "idx=" << idx; ASSERT_FALSE(img.empty()) << "idx=" << idx;
checkFrameContent(img, idx); checkFrameContent(img, idx);
} }
void checkFrameSeek(int idx, VideoCapture & cap) void checkFrameSeek(int idx, VideoCapture & cap)
{ {
bool canSeek = cap.set(CAP_PROP_POS_FRAMES, idx); bool canSeek = false;
ASSERT_NO_THROW(canSeek = cap.set(CAP_PROP_POS_FRAMES, idx));
if (!canSeek) if (!canSeek)
{ {
std::cout << "Seek to frame '" << idx << "' is not supported. SKIP." << std::endl; std::cout << "Seek to frame '" << idx << "' is not supported. SKIP." << std::endl;
@ -79,26 +131,15 @@ protected:
public: public:
void doTest() void doTest()
{ {
if (apiPref == CAP_AVFOUNDATION) VideoCapture cap;
{ ASSERT_NO_THROW(cap.open(video_file, apiPref));
// TODO: fix this backend
std::cout << "SKIP test: AVFoundation backend returns invalid frame count" << std::endl;
return;
}
else if (apiPref == CAP_VFW)
{
// TODO: fix this backend
std::cout << "SKIP test: Video for Windows backend not open files" << std::endl;
return;
}
VideoCapture cap(video_file, apiPref);
if (!cap.isOpened()) if (!cap.isOpened())
{ {
std::cout << "SKIP test: backend " << apiPref << " can't open the video: " << video_file << std::endl; std::cout << "SKIP test: backend " << apiPref << " can't open the video: " << video_file << std::endl;
return; return;
} }
int n_frames = (int)cap.get(CAP_PROP_FRAME_COUNT); int n_frames = -1;
EXPECT_NO_THROW(n_frames = (int)cap.get(CAP_PROP_FRAME_COUNT));
if (n_frames > 0) if (n_frames > 0)
{ {
ASSERT_GT(n_frames, 0); ASSERT_GT(n_frames, 0);
@ -124,7 +165,8 @@ public:
checkFrameRead(k, cap); checkFrameRead(k, cap);
} }
} }
bool canSeek = cap.set(CAP_PROP_POS_FRAMES, 0); bool canSeek = false;
EXPECT_NO_THROW(canSeek = cap.set(CAP_PROP_POS_FRAMES, 0));
if (!canSeek) if (!canSeek)
{ {
std::cout << "Seek to frame '0' is not supported. SKIP all 'seek' tests." << std::endl; std::cout << "Seek to frame '0' is not supported. SKIP all 'seek' tests." << std::endl;
@ -134,7 +176,9 @@ public:
if (ext != "wmv" && ext != "h264" && ext != "h265") if (ext != "wmv" && ext != "h264" && ext != "h265")
{ {
SCOPED_TRACE("progressive seek"); SCOPED_TRACE("progressive seek");
ASSERT_TRUE(cap.set(CAP_PROP_POS_FRAMES, 0)); bool res = false;
EXPECT_NO_THROW(res = cap.set(CAP_PROP_POS_FRAMES, 0));
ASSERT_TRUE(res);
for (int k = 0; k < n_frames; k += 20) for (int k = 0; k < n_frames; k += 20)
{ {
checkFrameSeek(k, cap); checkFrameSeek(k, cap);
@ -144,7 +188,9 @@ public:
if (ext != "mpg" && ext != "wmv" && ext != "h264" && ext != "h265") if (ext != "mpg" && ext != "wmv" && ext != "h264" && ext != "h265")
{ {
SCOPED_TRACE("random seek"); SCOPED_TRACE("random seek");
ASSERT_TRUE(cap.set(CAP_PROP_POS_FRAMES, 0)); bool res = false;
EXPECT_NO_THROW(res = cap.set(CAP_PROP_POS_FRAMES, 0));
ASSERT_TRUE(res);
for (int k = 0; k < 10; ++k) for (int k = 0; k < 10; ++k)
{ {
checkFrameSeek(cvtest::TS::ptr()->get_rng().uniform(0, n_frames), cap); checkFrameSeek(cvtest::TS::ptr()->get_rng().uniform(0, n_frames), cap);
@ -154,7 +200,7 @@ public:
}; };
//================================================================================================== //==================================================================================================
typedef tuple<string, int> Backend_Type_Params; typedef tuple<string, VideoCaptureAPI> Backend_Type_Params;
class Videoio_Bunny : public Videoio_Test_Base, public testing::TestWithParam<Backend_Type_Params> class Videoio_Bunny : public Videoio_Test_Base, public testing::TestWithParam<Backend_Type_Params>
{ {
@ -168,37 +214,29 @@ public:
} }
void doFrameCountTest() void doFrameCountTest()
{ {
if (apiPref == CAP_AVFOUNDATION) VideoCapture cap;
{ EXPECT_NO_THROW(cap.open(video_file, apiPref));
// TODO: fix this backend
std::cout << "SKIP test: AVFoundation backend returns invalid frame count" << std::endl;
return;
}
else if (apiPref == CAP_VFW)
{
// TODO: fix this backend
std::cout << "SKIP test: Video for Windows backend not open files" << std::endl;
return;
}
VideoCapture cap(video_file, apiPref);
if (!cap.isOpened()) if (!cap.isOpened())
{ {
std::cout << "SKIP test: backend " << apiPref << " can't open the video: " << video_file << std::endl; std::cout << "SKIP test: backend " << apiPref << " can't open the video: " << video_file << std::endl;
return; return;
} }
EXPECT_EQ(bunny_param.getWidth() , cap.get(CAP_PROP_FRAME_WIDTH)); Size actual;
EXPECT_EQ(bunny_param.getHeight(), cap.get(CAP_PROP_FRAME_HEIGHT)); EXPECT_NO_THROW(actual = Size((int)cap.get(CAP_PROP_FRAME_WIDTH),
(int)cap.get(CAP_PROP_FRAME_HEIGHT)));
EXPECT_EQ(bunny_param.getWidth(), actual.width);
EXPECT_EQ(bunny_param.getHeight(), actual.height);
double fps_prop = cap.get(CAP_PROP_FPS); double fps_prop = 0;
EXPECT_NO_THROW(fps_prop = cap.get(CAP_PROP_FPS));
if (fps_prop > 0) if (fps_prop > 0)
EXPECT_NEAR(fps_prop, bunny_param.getFps(), 1); EXPECT_NEAR(fps_prop, bunny_param.getFps(), 1);
else else
std::cout << "FPS is not available. SKIP check." << std::endl; std::cout << "FPS is not available. SKIP check." << std::endl;
int count_prop = 0; int count_prop = 0;
count_prop = (int)cap.get(CAP_PROP_FRAME_COUNT); EXPECT_NO_THROW(count_prop = (int)cap.get(CAP_PROP_FRAME_COUNT));
// mpg file reports 5.08 sec * 24 fps => property returns 122 frames // mpg file reports 5.08 sec * 24 fps => property returns 122 frames
// but actual number of frames returned is 125 // but actual number of frames returned is 125
if (ext != "mpg") if (ext != "mpg")
@ -213,7 +251,7 @@ public:
while (cap.isOpened()) while (cap.isOpened())
{ {
Mat frame; Mat frame;
cap >> frame; EXPECT_NO_THROW(cap >> frame);
if (frame.empty()) if (frame.empty())
break; break;
EXPECT_EQ(bunny_param.getWidth(), frame.cols); EXPECT_EQ(bunny_param.getWidth(), frame.cols);
@ -229,7 +267,15 @@ public:
} }
}; };
typedef tuple<string, string, float, int> Ext_Fourcc_PSNR; //==================================================================================================
struct Ext_Fourcc_PSNR
{
string ext;
string fourcc;
float PSNR;
VideoCaptureAPI api;
};
typedef tuple<Size, Ext_Fourcc_PSNR> Size_Ext_Fourcc_PSNR; typedef tuple<Size, Ext_Fourcc_PSNR> Size_Ext_Fourcc_PSNR;
class Videoio_Synthetic : public Videoio_Test_Base, public testing::TestWithParam<Size_Ext_Fourcc_PSNR> class Videoio_Synthetic : public Videoio_Test_Base, public testing::TestWithParam<Size_Ext_Fourcc_PSNR>
@ -243,39 +289,27 @@ public:
Videoio_Synthetic() Videoio_Synthetic()
{ {
frame_size = get<0>(GetParam()); frame_size = get<0>(GetParam());
const Ext_Fourcc_PSNR &param = get<1>(GetParam()); const Ext_Fourcc_PSNR p = get<1>(GetParam());
ext = get<0>(param); ext = p.ext;
fourcc = fourccFromString(get<1>(param)); fourcc = fourccFromString(p.fourcc);
PSNR_GT = get<2>(param); PSNR_GT = p.PSNR;
video_file = cv::tempfile((fourccToString(fourcc) + "." + ext).c_str()); video_file = cv::tempfile((fourccToString(fourcc) + "." + ext).c_str());
frame_count = 100; frame_count = 100;
fps = 25.; fps = 25.;
apiPref = get<3>(param); apiPref = p.api;
} }
void SetUp() void SetUp()
{ {
if (apiPref == CAP_AVFOUNDATION)
{
// TODO: fix this backend
std::cout << "SKIP test: AVFoundation backend can not write video" << std::endl;
return;
}
else if (apiPref == CAP_VFW)
{
// TODO: fix this backend
std::cout << "SKIP test: Video for Windows backend not open files" << std::endl;
return;
}
Mat img(frame_size, CV_8UC3); Mat img(frame_size, CV_8UC3);
VideoWriter writer(video_file, apiPref, fourcc, fps, frame_size, true); VideoWriter writer;
EXPECT_NO_THROW(writer.open(video_file, apiPref, fourcc, fps, frame_size, true));
ASSERT_TRUE(writer.isOpened()); ASSERT_TRUE(writer.isOpened());
for(int i = 0; i < frame_count; ++i ) for(int i = 0; i < frame_count; ++i )
{ {
generateFrame(i, frame_count, img); generateFrame(i, frame_count, img);
writer << img; EXPECT_NO_THROW(writer << img);
} }
writer.release(); EXPECT_NO_THROW(writer.release());
} }
void TearDown() void TearDown()
{ {
@ -301,6 +335,10 @@ public:
if (fourcc == VideoWriter::fourcc('M', 'P', 'E', 'G') && ext == "mkv") if (fourcc == VideoWriter::fourcc('M', 'P', 'E', 'G') && ext == "mkv")
expected_frame_count.end += 1; expected_frame_count.end += 1;
// Workaround for some gstreamer pipelines
if (apiPref == CAP_GSTREAMER)
expected_frame_count.start -= 1;
ASSERT_LE(expected_frame_count.start, actual); ASSERT_LE(expected_frame_count.start, actual);
ASSERT_GE(expected_frame_count.end, actual); ASSERT_GE(expected_frame_count.end, actual);
@ -310,22 +348,24 @@ public:
//================================================================================================== //==================================================================================================
int backend_params[] = { static VideoCaptureAPI backend_params[] = {
#ifdef HAVE_QUICKTIME #ifdef HAVE_QUICKTIME
CAP_QT, CAP_QT,
#endif #endif
#ifdef HAVE_AVFOUNDATION // TODO: Broken?
CAP_AVFOUNDATION, //#ifdef HAVE_AVFOUNDATION
#endif // CAP_AVFOUNDATION,
//#endif
#ifdef HAVE_MSMF #ifdef HAVE_MSMF
CAP_MSMF, CAP_MSMF,
#endif #endif
#ifdef HAVE_VFW // TODO: Broken?
CAP_VFW, //#ifdef HAVE_VFW
#endif // CAP_VFW,
//#endif
#ifdef HAVE_GSTREAMER #ifdef HAVE_GSTREAMER
CAP_GSTREAMER, CAP_GSTREAMER,
@ -343,7 +383,7 @@ int backend_params[] = {
// CAP_INTEL_MFX // CAP_INTEL_MFX
}; };
string bunny_params[] = { static string bunny_params[] = {
#ifdef HAVE_VIDEO_INPUT #ifdef HAVE_VIDEO_INPUT
string("wmv"), string("wmv"),
string("mov"), string("mov"),
@ -368,12 +408,22 @@ INSTANTIATE_TEST_CASE_P(videoio, Videoio_Bunny,
//================================================================================================== //==================================================================================================
inline Ext_Fourcc_PSNR makeParam(const char * ext, const char * fourcc, float psnr, int apipref) inline Ext_Fourcc_PSNR makeParam(const char * ext, const char * fourcc, float psnr, VideoCaptureAPIs apipref)
{ {
return make_tuple(string(ext), string(fourcc), (float)psnr, (int)apipref); Ext_Fourcc_PSNR res;
res.ext = ext;
res.fourcc = fourcc;
res.PSNR = psnr;
res.api = apipref;
return res;
} }
Ext_Fourcc_PSNR synthetic_params[] = { inline static std::ostream &operator<<(std::ostream &out, const Ext_Fourcc_PSNR &p)
{
out << "FOURCC(" << p.fourcc << "), ." << p.ext << ", " << p.api << ", " << p.PSNR << "dB"; return out;
}
static Ext_Fourcc_PSNR synthetic_params[] = {
#ifdef HAVE_MSMF #ifdef HAVE_MSMF
#if !defined(_M_ARM) #if !defined(_M_ARM)
@ -385,16 +435,17 @@ Ext_Fourcc_PSNR synthetic_params[] = {
makeParam("mov", "H264", 30.f, CAP_MSMF), makeParam("mov", "H264", 30.f, CAP_MSMF),
#endif #endif
#ifdef HAVE_VFW // TODO: Broken?
#if !defined(_M_ARM) //#ifdef HAVE_VFW
makeParam("wmv", "WMV1", 30.f, CAP_VFW), //#if !defined(_M_ARM)
makeParam("wmv", "WMV2", 30.f, CAP_VFW), // makeParam("wmv", "WMV1", 30.f, CAP_VFW),
#endif // makeParam("wmv", "WMV2", 30.f, CAP_VFW),
makeParam("wmv", "WMV3", 30.f, CAP_VFW), //#endif
makeParam("wmv", "WVC1", 30.f, CAP_VFW), // makeParam("wmv", "WMV3", 30.f, CAP_VFW),
makeParam("avi", "H264", 30.f, CAP_VFW), // makeParam("wmv", "WVC1", 30.f, CAP_VFW),
makeParam("avi", "MJPG", 30.f, CAP_VFW), // makeParam("avi", "H264", 30.f, CAP_VFW),
#endif // makeParam("avi", "MJPG", 30.f, CAP_VFW),
//#endif
#ifdef HAVE_QUICKTIME #ifdef HAVE_QUICKTIME
makeParam("mov", "mp4v", 30.f, CAP_QT), makeParam("mov", "mp4v", 30.f, CAP_QT),
@ -408,17 +459,18 @@ Ext_Fourcc_PSNR synthetic_params[] = {
makeParam("mkv", "MJPG", 30.f, CAP_QT), makeParam("mkv", "MJPG", 30.f, CAP_QT),
#endif #endif
#ifdef HAVE_AVFOUNDATION // TODO: Broken?
makeParam("mov", "mp4v", 30.f, CAP_AVFOUNDATION), //#ifdef HAVE_AVFOUNDATION
makeParam("avi", "XVID", 30.f, CAP_AVFOUNDATION), // makeParam("mov", "mp4v", 30.f, CAP_AVFOUNDATION),
makeParam("avi", "MPEG", 30.f, CAP_AVFOUNDATION), // makeParam("avi", "XVID", 30.f, CAP_AVFOUNDATION),
makeParam("avi", "IYUV", 30.f, CAP_AVFOUNDATION), // makeParam("avi", "MPEG", 30.f, CAP_AVFOUNDATION),
makeParam("avi", "MJPG", 30.f, CAP_AVFOUNDATION), // makeParam("avi", "IYUV", 30.f, CAP_AVFOUNDATION),
// makeParam("avi", "MJPG", 30.f, CAP_AVFOUNDATION),
makeParam("mkv", "XVID", 30.f, CAP_AVFOUNDATION), // makeParam("mkv", "XVID", 30.f, CAP_AVFOUNDATION),
makeParam("mkv", "MPEG", 30.f, CAP_AVFOUNDATION), // makeParam("mkv", "MPEG", 30.f, CAP_AVFOUNDATION),
makeParam("mkv", "MJPG", 30.f, CAP_AVFOUNDATION), // makeParam("mkv", "MJPG", 30.f, CAP_AVFOUNDATION),
#endif //#endif
#ifdef HAVE_FFMPEG #ifdef HAVE_FFMPEG
makeParam("avi", "XVID", 30.f, CAP_FFMPEG), makeParam("avi", "XVID", 30.f, CAP_FFMPEG),
@ -432,15 +484,13 @@ Ext_Fourcc_PSNR synthetic_params[] = {
#endif #endif
#ifdef HAVE_GSTREAMER #ifdef HAVE_GSTREAMER
// makeParam("avi", "XVID", 30.f, CAP_GSTREAMER), - corrupted frames, broken indexes
makeParam("avi", "MPEG", 30.f, CAP_GSTREAMER), makeParam("avi", "MPEG", 30.f, CAP_GSTREAMER),
makeParam("avi", "IYUV", 30.f, CAP_GSTREAMER),
makeParam("avi", "MJPG", 30.f, CAP_GSTREAMER), makeParam("avi", "MJPG", 30.f, CAP_GSTREAMER),
makeParam("avi", "H264", 30.f, CAP_GSTREAMER), makeParam("avi", "H264", 30.f, CAP_GSTREAMER),
// makeParam("mkv", "XVID", 30.f, CAP_GSTREAMER),
makeParam("mkv", "MPEG", 30.f, CAP_GSTREAMER), makeParam("mkv", "MPEG", 30.f, CAP_GSTREAMER),
makeParam("mkv", "MJPG", 30.f, CAP_GSTREAMER), makeParam("mkv", "MJPG", 30.f, CAP_GSTREAMER),
makeParam("mkv", "H264", 30.f, CAP_GSTREAMER),
#endif #endif
makeParam("avi", "MJPG", 30.f, CAP_OPENCV_MJPEG), makeParam("avi", "MJPG", 30.f, CAP_OPENCV_MJPEG),

View File

@ -8,144 +8,430 @@
using namespace std; using namespace std;
using namespace cv; using namespace cv;
string getGstDemuxPlugin(string container); class GStreamerPipeline
string getGstAvDecodePlugin(string codec);
int main(int argc, char *argv[])
{ {
const string keys = public:
"{h help usage ? | | print help messages }" // Preprocessing arguments command line
"{p pipeline |gst-default| pipeline name (supported: 'gst-default', 'gst-vaapi', 'gst-libav', 'ffmpeg') }" GStreamerPipeline(int argc, char *argv[])
"{ct container |mp4 | container name (supported: 'mp4', 'mov', 'avi', 'mkv') }"
"{cd codec |h264 | codec name (supported: 'h264', 'h265', 'mpeg2', 'mpeg4', 'mjpeg', 'vp8') }"
"{f file path | | path to file }"
"{fm fast | | fast measure fps }";
CommandLineParser parser(argc, argv, keys);
parser.about("This program shows how to read a video file with GStreamer pipeline with OpenCV.");
if (parser.has("help"))
{ {
parser.printMessage(); const string keys =
return 0; "{h help usage ? | | print help messages }"
"{m mode | | coding mode (supported: encode, decode) }"
"{p pipeline |default | pipeline name (supported: 'default', 'gst-basic', 'gst-vaapi', 'gst-libav', 'ffmpeg') }"
"{ct container |mp4 | container name (supported: 'mp4', 'mov', 'avi', 'mkv') }"
"{cd codec |h264 | codec name (supported: 'h264', 'h265', 'mpeg2', 'mpeg4', 'mjpeg', 'vp8') }"
"{f file path | | path to file }"
"{vr resolution |720p | video resolution for encoding (supported: '720p', '1080p', '4k') }"
"{fps |30 | fix frame per second for encoding (supported: fps > 0) }"
"{fm fast | | fast measure fps }";
cmd_parser = new CommandLineParser(argc, argv, keys);
cmd_parser->about("This program shows how to read a video file with GStreamer pipeline with OpenCV.");
if (cmd_parser->has("help"))
{
cmd_parser->printMessage();
exit_code = -1;
}
fast_measure = cmd_parser->has("fast"); // fast measure fps
fix_fps = cmd_parser->get<int>("fps"); // fixed frame per second
pipeline = cmd_parser->get<string>("pipeline"), // gstreamer pipeline type
container = cmd_parser->get<string>("container"), // container type
mode = cmd_parser->get<string>("mode"), // coding mode
codec = cmd_parser->get<string>("codec"), // codec type
file_name = cmd_parser->get<string>("file"), // path to videofile
resolution = cmd_parser->get<string>("resolution"); // video resolution
if (!cmd_parser->check())
{
cmd_parser->printErrors();
exit_code = -1;
}
exit_code = 0;
} }
bool arg_fast_measure = parser.has("fast"); // fast measure fps ~GStreamerPipeline() { delete cmd_parser; }
string arg_pipeline = parser.get<string>("pipeline"), // GStreamer pipeline type
arg_container = parser.get<string>("container"), // container type
arg_codec = parser.get<string>("codec"), // codec type
arg_file_name = parser.get<string>("file"); // path to videofile
VideoCapture cap;
if (!parser.check()) // Start pipeline
int run()
{ {
parser.printErrors(); if (exit_code < 0) { return exit_code; }
return 0; if (mode == "decode") { if (createDecodePipeline() < 0) return -1; }
} else if (mode == "encode") { if (createEncodePipeline() < 0) return -1; }
else
// Choose the constructed GStreamer pipeline
if (arg_pipeline.find("gst") == 0)
{
ostringstream pipeline;
pipeline << "filesrc location=\"" << arg_file_name << "\"";
pipeline << " ! " << getGstDemuxPlugin(arg_container);
if (arg_pipeline.find("default") == 4) {
pipeline << " ! decodebin";
}
else if (arg_pipeline.find("vaapi1710") == 4)
{ {
pipeline << " ! vaapidecodebin"; cout << "Unsupported mode: " << mode << endl;
if (arg_container == "mkv") cmd_parser->printErrors();
{ return -1;
pipeline << " ! autovideoconvert";
}
else
{
pipeline << " ! video/x-raw, format=YV12";
}
} }
else if (arg_pipeline.find("libav") == 4) cout << "_____________________________________" << endl;
cout << "Pipeline " << mode << ":" << endl;
cout << stream_pipeline.str() << endl;
// Choose a show video or only measure fps
cout << "_____________________________________" << endl;
cout << "Start measure frame per seconds (fps)" << endl;
cout << "Loading ..." << endl;
vector<double> tick_counts;
cout << "Start " << mode << ": " << file_name;
cout << " (" << pipeline << ")" << endl;
while(true)
{ {
pipeline << " ! " << getGstAvDecodePlugin(arg_codec); int64 temp_count_tick = 0;
if (mode == "decode")
{
Mat frame;
temp_count_tick = getTickCount();
cap >> frame;
temp_count_tick = getTickCount() - temp_count_tick;
if (frame.empty()) { break; }
}
else if (mode == "encode")
{
Mat element;
while(!cap.grab());
cap.retrieve(element);
temp_count_tick = getTickCount();
wrt << element;
temp_count_tick = getTickCount() - temp_count_tick;
}
tick_counts.push_back(static_cast<double>(temp_count_tick));
if (((mode == "decode") && fast_measure && (tick_counts.size() > 1e3)) ||
((mode == "encode") && (tick_counts.size() > 3e3)) ||
((mode == "encode") && fast_measure && (tick_counts.size() > 1e2)))
{ break; }
}
double time_fps = sum(tick_counts)[0] / getTickFrequency();
if (tick_counts.size() != 0)
{
cout << "Finished: " << tick_counts.size() << " in " << time_fps <<" sec ~ " ;
cout << tick_counts.size() / time_fps <<" fps " << endl;
} }
else else
{ {
parser.printMessage(); cout << "Failed " << mode << ": " << file_name;
cout << "Unsupported pipeline: " << arg_pipeline << endl; cout << " (" << pipeline << ")" << endl;
return -4; return -1;
}
return 0;
}
// Free video resource
void close()
{
cap.release();
wrt.release();
}
private:
// Choose the constructed GStreamer pipeline for decode
int createDecodePipeline()
{
if (pipeline == "default") {
cap = VideoCapture(file_name, CAP_GSTREAMER);
}
else if (pipeline.find("gst") == 0)
{
stream_pipeline << "filesrc location=\"" << file_name << "\"";
stream_pipeline << " ! " << getGstMuxPlugin();
if (pipeline.find("basic") == 4)
{
stream_pipeline << getGstDefaultCodePlugin();
}
else if (pipeline.find("vaapi1710") == 4)
{
stream_pipeline << getGstVaapiCodePlugin();
}
else if (pipeline.find("libav") == 4)
{
stream_pipeline << getGstAvCodePlugin();
}
else
{
cout << "Unsupported pipeline: " << pipeline << endl;
cmd_parser->printErrors();
return -1;
}
stream_pipeline << " ! videoconvert n-threads=" << getNumThreads();
stream_pipeline << " ! appsink sync=false";
cap = VideoCapture(stream_pipeline.str(), CAP_GSTREAMER);
}
else if (pipeline == "ffmpeg")
{
cap = VideoCapture(file_name, CAP_FFMPEG);
stream_pipeline << "default pipeline for ffmpeg" << endl;
}
else
{
cout << "Unsupported pipeline: " << pipeline << endl;
cmd_parser->printErrors();
return -1;
}
return 0;
}
// Choose the constructed GStreamer pipeline for encode
int createEncodePipeline()
{
if (checkConfiguration() < 0) return -1;
ostringstream test_pipeline;
test_pipeline << "videotestsrc pattern=smpte";
test_pipeline << " ! video/x-raw, " << getVideoSettings();
test_pipeline << " ! appsink sync=false";
cap = VideoCapture(test_pipeline.str(), CAP_GSTREAMER);
if (pipeline == "default") {
wrt = VideoWriter(file_name, CAP_GSTREAMER, getFourccCode(), fix_fps, fix_size, true);
}
else if (pipeline.find("gst") == 0)
{
stream_pipeline << "appsrc ! videoconvert n-threads=" << getNumThreads() << " ! ";
if (pipeline.find("basic") == 4)
{
stream_pipeline << getGstDefaultCodePlugin();
}
else if (pipeline.find("vaapi1710") == 4)
{
stream_pipeline << getGstVaapiCodePlugin();
}
else if (pipeline.find("libav") == 4)
{
stream_pipeline << getGstAvCodePlugin();
}
else
{
cout << "Unsupported pipeline: " << pipeline << endl;
cmd_parser->printErrors();
return -1;
}
stream_pipeline << " ! " << getGstMuxPlugin();
stream_pipeline << " ! filesink location=\"" << file_name << "\"";
wrt = VideoWriter(stream_pipeline.str(), CAP_GSTREAMER, 0, fix_fps, fix_size, true);
}
else if (pipeline == "ffmpeg")
{
wrt = VideoWriter(file_name, CAP_FFMPEG, getFourccCode(), fix_fps, fix_size, true);
stream_pipeline << "default pipeline for ffmpeg" << endl;
}
else
{
cout << "Unsupported pipeline: " << pipeline << endl;
cmd_parser->printErrors();
return -1;
}
return 0;
}
// Choose video resolution for encoding
string getVideoSettings()
{
ostringstream video_size;
if (fix_fps > 0) { video_size << "framerate=" << fix_fps << "/1, "; }
else
{
cout << "Unsupported fps (< 0): " << fix_fps << endl;
cmd_parser->printErrors();
return string();
} }
pipeline << " ! videoconvert"; if (resolution == "720p") { fix_size = Size(1280, 720); }
pipeline << " n-threads=" << getNumThreads(); else if (resolution == "1080p") { fix_size = Size(1920, 1080); }
pipeline << " ! appsink sync=false"; else if (resolution == "4k") { fix_size = Size(3840, 2160); }
cap = VideoCapture(pipeline.str(), CAP_GSTREAMER); else
{
cout << "Unsupported video resolution: " << resolution << endl;
cmd_parser->printErrors();
return string();
}
video_size << "width=" << fix_size.width << ", height=" << fix_size.height;
return video_size.str();
} }
else if (arg_pipeline == "ffmpeg")
// Choose a video container
string getGstMuxPlugin()
{ {
cap = VideoCapture(arg_file_name, CAP_FFMPEG); ostringstream plugin;
if (container == "avi") { plugin << "avi"; }
else if (container == "mp4") { plugin << "qt"; }
else if (container == "mov") { plugin << "qt"; }
else if (container == "mkv") { plugin << "matroska"; }
else
{
cout << "Unsupported container: " << container << endl;
cmd_parser->printErrors();
return string();
}
if (mode == "decode") { plugin << "demux"; }
else if (mode == "encode") { plugin << "mux"; }
else
{
cout << "Unsupported mode: " << mode << endl;
cmd_parser->printErrors();
return string();
}
return plugin.str();
} }
else
// Choose a libav codec
string getGstAvCodePlugin()
{ {
parser.printMessage(); ostringstream plugin;
cout << "Unsupported pipeline: " << arg_pipeline << endl; if (mode == "decode")
return -4; {
if (codec == "h264") { plugin << "h264parse ! "; }
else if (codec == "h265") { plugin << "h265parse ! "; }
plugin << "avdec_";
}
else if (mode == "encode") { plugin << "avenc_"; }
else
{
cout << "Unsupported mode: " << mode << endl;
cmd_parser->printErrors();
return string();
}
if (codec == "h264") { plugin << "h264"; }
else if (codec == "h265") { plugin << "h265"; }
else if (codec == "mpeg2") { plugin << "mpeg2video"; }
else if (codec == "mpeg4") { plugin << "mpeg4"; }
else if (codec == "mjpeg") { plugin << "mjpeg"; }
else if (codec == "vp8") { plugin << "vp8"; }
else
{
cout << "Unsupported libav codec: " << codec << endl;
cmd_parser->printErrors();
return string();
}
return plugin.str();
} }
// Choose a show video or only measure fps // Choose a vaapi codec
cout << "_____________________________________" << '\n'; string getGstVaapiCodePlugin()
cout << "Start measure frame per seconds (fps)" << '\n';
cout << "Loading ..." << '\n';
Mat frame;
vector<double> tick_counts;
cout << "Start decoding: " << arg_file_name;
cout << " (" << arg_pipeline << ")" << endl;
while(true)
{ {
int64 temp_count_tick = getTickCount(); ostringstream plugin;
cap >> frame; if (mode == "decode")
temp_count_tick = getTickCount() - temp_count_tick; {
if (frame.empty()) { break; } plugin << "vaapidecodebin";
tick_counts.push_back(static_cast<double>(temp_count_tick)); if (container == "mkv") { plugin << " ! autovideoconvert"; }
if (arg_fast_measure && (tick_counts.size() > 1000)) { break; } else { plugin << " ! video/x-raw, format=YV12"; }
}
else if (mode == "encode")
{
if (codec == "h264") { plugin << "vaapih264enc"; }
else if (codec == "h265") { plugin << "vaapih265enc"; }
else if (codec == "mpeg2") { plugin << "vaapimpeg2enc"; }
else if (codec == "mjpeg") { plugin << "vaapijpegenc"; }
else if (codec == "vp8") { plugin << "vaapivp8enc"; }
else
{
cout << "Unsupported vaapi codec: " << codec << endl;
cmd_parser->printErrors();
return string();
}
}
else
{
cout << "Unsupported mode: " << resolution << endl;
cmd_parser->printErrors();
return string();
}
return plugin.str();
}
// Choose a default codec
string getGstDefaultCodePlugin()
{
ostringstream plugin;
if (mode == "decode")
{
plugin << " ! decodebin";
}
else if (mode == "encode")
{
if (codec == "h264") { plugin << "x264enc"; }
else if (codec == "h265") { plugin << "x265enc"; }
else if (codec == "mpeg2") { plugin << "mpeg2enc"; }
else if (codec == "mjpeg") { plugin << "jpegenc"; }
else if (codec == "vp8") { plugin << "vp8enc"; }
else
{
cout << "Unsupported default codec: " << codec << endl;
cmd_parser->printErrors();
return string();
}
}
else
{
cout << "Unsupported mode: " << resolution << endl;
cmd_parser->printErrors();
return string();
}
return plugin.str();
}
// Get fourcc for codec
int getFourccCode()
{
if (codec == "h264") { return VideoWriter::fourcc('H','2','6','4'); }
else if (codec == "h265") { return VideoWriter::fourcc('H','E','V','C'); }
else if (codec == "mpeg2") { return VideoWriter::fourcc('M','P','E','G'); }
else if (codec == "mpeg4") { return VideoWriter::fourcc('M','P','4','2'); }
else if (codec == "mjpeg") { return VideoWriter::fourcc('M','J','P','G'); }
else if (codec == "vp8") { return VideoWriter::fourcc('V','P','8','0'); }
else
{
cout << "Unsupported ffmpeg codec: " << codec << endl;
cmd_parser->printErrors();
return 0;
}
} }
double time_fps = sum(tick_counts)[0] / getTickFrequency();
if (tick_counts.size() != 0) // Check bad configuration
int checkConfiguration()
{ {
cout << "Finished: " << tick_counts.size() << " in " << time_fps <<" sec ~ " ; if ((codec == "mpeg2" && getGstMuxPlugin() == "qtmux") ||
cout << tick_counts.size() / time_fps <<" fps " << endl; (codec == "h265" && getGstMuxPlugin() == "avimux") ||
(pipeline == "gst-libav" && (codec == "h264" || codec == "h265")) ||
(pipeline == "gst-vaapi1710" && codec=="mpeg2" && resolution=="4k") ||
(pipeline == "gst-vaapi1710" && codec=="mpeg2" && resolution=="1080p" && fix_fps > 30))
{
cout << "Unsupported configuration" << endl;
cmd_parser->printErrors();
return -1;
}
return 0;
} }
else
{ bool fast_measure; // fast measure fps
cout << "Failed decoding: " << arg_file_name; string pipeline, // gstreamer pipeline type
cout << " (" << arg_pipeline << ")" << endl; container, // container type
return -5; mode, // coding mode
} codec, // codec type
return 0; file_name, // path to videofile
} resolution; // video resolution
int fix_fps; // fixed frame per second
// Choose a video container Size fix_size; // fixed frame size
string getGstDemuxPlugin(string container) { int exit_code;
if (container == "avi") { return "avidemux"; } VideoWriter wrt;
else if (container == "mp4") { return "qtdemux"; } VideoCapture cap;
else if (container == "mov") { return "qtdemux"; } ostringstream stream_pipeline;
else if (container == "mkv") { return "matroskademux"; } CommandLineParser* cmd_parser;
return string(); };
}
int main(int argc, char *argv[])
// Choose a codec {
string getGstAvDecodePlugin(string codec) { GStreamerPipeline pipe(argc, argv);
if (codec == "h264") { return "h264parse ! avdec_h264"; } return pipe.run();
else if (codec == "h265") { return "h265parse ! avdec_h265"; }
else if (codec == "mpeg2") { return "avdec_mpeg2video"; }
else if (codec == "mpeg4") { return "avdec_mpeg4"; }
else if (codec == "mjpeg") { return "avdec_mjpeg"; }
else if (codec == "vp8") { return "avdec_vp8"; }
return string();
} }

View File

@ -0,0 +1,232 @@
#include <opencv2/dnn.hpp>
//! [A custom layer interface]
class MyLayer : public cv::dnn::Layer
{
public:
//! [MyLayer::MyLayer]
MyLayer(const cv::dnn::LayerParams &params);
//! [MyLayer::MyLayer]
//! [MyLayer::create]
static cv::Ptr<cv::dnn::Layer> create(cv::dnn::LayerParams& params);
//! [MyLayer::create]
//! [MyLayer::getMemoryShapes]
virtual bool getMemoryShapes(const std::vector<std::vector<int> > &inputs,
const int requiredOutputs,
std::vector<std::vector<int> > &outputs,
std::vector<std::vector<int> > &internals) const;
//! [MyLayer::getMemoryShapes]
//! [MyLayer::forward]
virtual void forward(std::vector<cv::Mat*> &inputs, std::vector<cv::Mat> &outputs, std::vector<cv::Mat> &internals);
//! [MyLayer::forward]
//! [MyLayer::finalize]
virtual void finalize(const std::vector<cv::Mat*> &inputs, std::vector<cv::Mat> &outputs);
//! [MyLayer::finalize]
virtual void forward(cv::InputArrayOfArrays inputs, cv::OutputArrayOfArrays outputs, cv::OutputArrayOfArrays internals);
};
//! [A custom layer interface]
//! [InterpLayer]
class InterpLayer : public cv::dnn::Layer
{
public:
InterpLayer(const cv::dnn::LayerParams &params) : Layer(params)
{
outWidth = params.get<int>("width", 0);
outHeight = params.get<int>("height", 0);
}
static cv::Ptr<cv::dnn::Layer> create(cv::dnn::LayerParams& params)
{
return cv::Ptr<cv::dnn::Layer>(new InterpLayer(params));
}
virtual bool getMemoryShapes(const std::vector<std::vector<int> > &inputs,
const int requiredOutputs,
std::vector<std::vector<int> > &outputs,
std::vector<std::vector<int> > &internals) const
{
CV_UNUSED(requiredOutputs); CV_UNUSED(internals);
std::vector<int> outShape(4);
outShape[0] = inputs[0][0]; // batch size
outShape[1] = inputs[0][1]; // number of channels
outShape[2] = outHeight;
outShape[3] = outWidth;
outputs.assign(1, outShape);
return false;
}
// Implementation of this custom layer is based on https://github.com/cdmh/deeplab-public/blob/master/src/caffe/layers/interp_layer.cpp
virtual void forward(std::vector<cv::Mat*> &inputs, std::vector<cv::Mat> &outputs, std::vector<cv::Mat> &internals)
{
CV_UNUSED(internals);
cv::Mat& inp = *inputs[0];
cv::Mat& out = outputs[0];
const float* inpData = (float*)inp.data;
float* outData = (float*)out.data;
const int batchSize = inp.size[0];
const int numChannels = inp.size[1];
const int inpHeight = inp.size[2];
const int inpWidth = inp.size[3];
const float rheight = (outHeight > 1) ? static_cast<float>(inpHeight - 1) / (outHeight - 1) : 0.f;
const float rwidth = (outWidth > 1) ? static_cast<float>(inpWidth - 1) / (outWidth - 1) : 0.f;
for (int h2 = 0; h2 < outHeight; ++h2)
{
const float h1r = rheight * h2;
const int h1 = static_cast<int>(h1r);
const int h1p = (h1 < inpHeight - 1) ? 1 : 0;
const float h1lambda = h1r - h1;
const float h0lambda = 1.f - h1lambda;
for (int w2 = 0; w2 < outWidth; ++w2)
{
const float w1r = rwidth * w2;
const int w1 = static_cast<int>(w1r);
const int w1p = (w1 < inpWidth - 1) ? 1 : 0;
const float w1lambda = w1r - w1;
const float w0lambda = 1.f - w1lambda;
const float* pos1 = inpData + h1 * inpWidth + w1;
float* pos2 = outData + h2 * outWidth + w2;
for (int c = 0; c < batchSize * numChannels; ++c)
{
pos2[0] =
h0lambda * (w0lambda * pos1[0] + w1lambda * pos1[w1p]) +
h1lambda * (w0lambda * pos1[h1p * inpWidth] + w1lambda * pos1[h1p * inpWidth + w1p]);
pos1 += inpWidth * inpHeight;
pos2 += outWidth * outHeight;
}
}
}
}
virtual void forward(cv::InputArrayOfArrays, cv::OutputArrayOfArrays, cv::OutputArrayOfArrays) {}
private:
int outWidth, outHeight;
};
//! [InterpLayer]
//! [ResizeBilinearLayer]
class ResizeBilinearLayer : public cv::dnn::Layer
{
public:
ResizeBilinearLayer(const cv::dnn::LayerParams &params) : Layer(params)
{
CV_Assert(!params.get<bool>("align_corners", false));
CV_Assert(blobs.size() == 1, blobs[0].type() == CV_32SC1);
outHeight = blobs[0].at<int>(0, 0);
outWidth = blobs[0].at<int>(0, 1);
}
static cv::Ptr<cv::dnn::Layer> create(cv::dnn::LayerParams& params)
{
return cv::Ptr<cv::dnn::Layer>(new ResizeBilinearLayer(params));
}
virtual bool getMemoryShapes(const std::vector<std::vector<int> > &inputs,
const int requiredOutputs,
std::vector<std::vector<int> > &outputs,
std::vector<std::vector<int> > &internals) const
{
CV_UNUSED(requiredOutputs); CV_UNUSED(internals);
std::vector<int> outShape(4);
outShape[0] = inputs[0][0]; // batch size
outShape[1] = inputs[0][1]; // number of channels
outShape[2] = outHeight;
outShape[3] = outWidth;
outputs.assign(1, outShape);
return false;
}
// This implementation is based on a reference implementation from
// https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h
virtual void forward(std::vector<cv::Mat*> &inputs, std::vector<cv::Mat> &outputs, std::vector<cv::Mat> &internals)
{
CV_UNUSED(internals);
cv::Mat& inp = *inputs[0];
cv::Mat& out = outputs[0];
const float* inpData = (float*)inp.data;
float* outData = (float*)out.data;
const int batchSize = inp.size[0];
const int numChannels = inp.size[1];
const int inpHeight = inp.size[2];
const int inpWidth = inp.size[3];
float heightScale = static_cast<float>(inpHeight) / outHeight;
float widthScale = static_cast<float>(inpWidth) / outWidth;
for (int b = 0; b < batchSize; ++b)
{
for (int y = 0; y < outHeight; ++y)
{
float input_y = y * heightScale;
int y0 = static_cast<int>(std::floor(input_y));
int y1 = std::min(y0 + 1, inpHeight - 1);
for (int x = 0; x < outWidth; ++x)
{
float input_x = x * widthScale;
int x0 = static_cast<int>(std::floor(input_x));
int x1 = std::min(x0 + 1, inpWidth - 1);
for (int c = 0; c < numChannels; ++c)
{
float interpolation =
inpData[offset(inp.size, c, x0, y0, b)] * (1 - (input_y - y0)) * (1 - (input_x - x0)) +
inpData[offset(inp.size, c, x0, y1, b)] * (input_y - y0) * (1 - (input_x - x0)) +
inpData[offset(inp.size, c, x1, y0, b)] * (1 - (input_y - y0)) * (input_x - x0) +
inpData[offset(inp.size, c, x1, y1, b)] * (input_y - y0) * (input_x - x0);
outData[offset(out.size, c, x, y, b)] = interpolation;
}
}
}
}
}
virtual void forward(cv::InputArrayOfArrays, cv::OutputArrayOfArrays, cv::OutputArrayOfArrays) {}
private:
static inline int offset(const cv::MatSize& size, int c, int x, int y, int b)
{
return x + size[3] * (y + size[2] * (c + size[1] * b));
}
int outWidth, outHeight;
};
//! [ResizeBilinearLayer]
//! [Register a custom layer]
#include <opencv2/dnn/layer.details.hpp> // CV_DNN_REGISTER_LAYER_CLASS macro
int main(int argc, char** argv)
{
CV_DNN_REGISTER_LAYER_CLASS(MyType, MyLayer);
// ...
//! [Register a custom layer]
CV_UNUSED(argc); CV_UNUSED(argv);
//! [Register InterpLayer]
CV_DNN_REGISTER_LAYER_CLASS(Interp, InterpLayer);
cv::dnn::Net caffeNet = cv::dnn::readNet("/path/to/config.prototxt", "/path/to/weights.caffemodel");
//! [Register InterpLayer]
//! [Register ResizeBilinearLayer]
CV_DNN_REGISTER_LAYER_CLASS(ResizeBilinear, ResizeBilinearLayer);
cv::dnn::Net tfNet = cv::dnn::readNet("/path/to/graph.pb");
//! [Register ResizeBilinearLayer]
}
cv::Ptr<cv::dnn::Layer> MyLayer::create(cv::dnn::LayerParams& params)
{
return cv::Ptr<cv::dnn::Layer>(new MyLayer(params));
}
MyLayer::MyLayer(const cv::dnn::LayerParams&) {}
bool MyLayer::getMemoryShapes(const std::vector<std::vector<int> >&, const int,
std::vector<std::vector<int> >&,
std::vector<std::vector<int> >&) const { return false; }
void MyLayer::forward(std::vector<cv::Mat*>&, std::vector<cv::Mat>&, std::vector<cv::Mat>&) {}
void MyLayer::finalize(const std::vector<cv::Mat*>&, std::vector<cv::Mat>&) {}
void MyLayer::forward(cv::InputArrayOfArrays, cv::OutputArrayOfArrays, cv::OutputArrayOfArrays) {}

View File

@ -214,7 +214,7 @@ void postprocess(Mat& frame, const std::vector<Mat>& outs, Net& net)
} }
} }
std::vector<int> indices; std::vector<int> indices;
NMSBoxes(boxes, confidences, confThreshold, 0.4, indices); NMSBoxes(boxes, confidences, confThreshold, 0.4f, indices);
for (size_t i = 0; i < indices.size(); ++i) for (size_t i = 0; i < indices.size(); ++i)
{ {
int idx = indices[i]; int idx = indices[i];