Merge remote-tracking branch 'upstream/3.4' into merge-3.4

This commit is contained in:
Alexander Alekhin 2018-12-05 19:54:52 +00:00
commit e82e672a93
26 changed files with 302 additions and 330 deletions

View File

@ -180,17 +180,6 @@
volume = {9}, volume = {9},
publisher = {Walter de Gruyter} publisher = {Walter de Gruyter}
} }
@inproceedings{DD02,
author = {Durand, Fr{\'e}do and Dorsey, Julie},
title = {Fast bilateral filtering for the display of high-dynamic-range images},
booktitle = {ACM Transactions on Graphics (TOG)},
year = {2002},
pages = {257--266},
volume = {21},
number = {3},
publisher = {ACM},
url = {https://www.researchgate.net/profile/Julie_Dorsey/publication/220184746_Fast_Bilateral_Filtering_for_the_Display_of_High_-_dynamic_-_range_Images/links/54566b000cf26d5090a95f96/Fast-Bilateral-Filtering-for-the-Display-of-High-dynamic-range-Images.pdf}
}
@inproceedings{DM03, @inproceedings{DM03,
author = {Drago, Fr{\'e}d{\'e}ric and Myszkowski, Karol and Annen, Thomas and Chiba, Norishige}, author = {Drago, Fr{\'e}d{\'e}ric and Myszkowski, Karol and Annen, Thomas and Chiba, Norishige},
title = {Adaptive logarithmic mapping for displaying high contrast scenes}, title = {Adaptive logarithmic mapping for displaying high contrast scenes},

View File

@ -85,10 +85,8 @@ we will later have to clip the data in order to avoid overflow.
@code{.py} @code{.py}
# Tonemap HDR image # Tonemap HDR image
tonemap1 = cv.createTonemapDurand(gamma=2.2) tonemap1 = cv.createTonemap(gamma=2.2)
res_debevec = tonemap1.process(hdr_debevec.copy()) res_debevec = tonemap1.process(hdr_debevec.copy())
tonemap2 = cv.createTonemapDurand(gamma=1.3)
res_robertson = tonemap2.process(hdr_robertson.copy())
@endcode @endcode
### 4. Merge exposures using Mertens fusion ### 4. Merge exposures using Mertens fusion
@ -173,5 +171,5 @@ Additional Resources
Exercises Exercises
--------- ---------
1. Try all tonemap algorithms: cv::TonemapDrago, cv::TonemapDurand, cv::TonemapMantiuk and cv::TonemapReinhard 1. Try all tonemap algorithms: cv::TonemapDrago, cv::TonemapMantiuk and cv::TonemapReinhard
2. Try changing the parameters in the HDR calibration and tonemap methods. 2. Try changing the parameters in the HDR calibration and tonemap methods.

View File

@ -171,7 +171,7 @@ Now it's time to look at the results. Note that HDR image can't be stored in one
formats, so we save it to Radiance image (.hdr). Also all HDR imaging functions return results in formats, so we save it to Radiance image (.hdr). Also all HDR imaging functions return results in
[0, 1] range so we should multiply result by 255. [0, 1] range so we should multiply result by 255.
You can try other tonemap algorithms: cv::TonemapDrago, cv::TonemapDurand, cv::TonemapMantiuk and cv::TonemapReinhard You can try other tonemap algorithms: cv::TonemapDrago, cv::TonemapMantiuk and cv::TonemapReinhard
You can also adjust the parameters in the HDR calibration and tonemap methods for your own photos. You can also adjust the parameters in the HDR calibration and tonemap methods for your own photos.
Results Results

View File

@ -1333,7 +1333,7 @@ struct InRange_SIMD
} }
}; };
#if CV_SIMD128 #if CV_SIMD
template <> template <>
struct InRange_SIMD<uchar> struct InRange_SIMD<uchar>
@ -1342,16 +1342,17 @@ struct InRange_SIMD<uchar>
uchar * dst, int len) const uchar * dst, int len) const
{ {
int x = 0; int x = 0;
const int width = v_uint8x16::nlanes; const int width = v_uint8::nlanes;
for (; x <= len - width; x += width) for (; x <= len - width; x += width)
{ {
v_uint8x16 values = v_load(src1 + x); v_uint8 values = vx_load(src1 + x);
v_uint8x16 low = v_load(src2 + x); v_uint8 low = vx_load(src2 + x);
v_uint8x16 high = v_load(src3 + x); v_uint8 high = vx_load(src3 + x);
v_store(dst + x, (values >= low) & (high >= values)); v_store(dst + x, (values >= low) & (high >= values));
} }
vx_cleanup();
return x; return x;
} }
}; };
@ -1363,16 +1364,17 @@ struct InRange_SIMD<schar>
uchar * dst, int len) const uchar * dst, int len) const
{ {
int x = 0; int x = 0;
const int width = v_int8x16::nlanes; const int width = v_int8::nlanes;
for (; x <= len - width; x += width) for (; x <= len - width; x += width)
{ {
v_int8x16 values = v_load(src1 + x); v_int8 values = vx_load(src1 + x);
v_int8x16 low = v_load(src2 + x); v_int8 low = vx_load(src2 + x);
v_int8x16 high = v_load(src3 + x); v_int8 high = vx_load(src3 + x);
v_store((schar*)(dst + x), (values >= low) & (high >= values)); v_store((schar*)(dst + x), (values >= low) & (high >= values));
} }
vx_cleanup();
return x; return x;
} }
}; };
@ -1384,20 +1386,21 @@ struct InRange_SIMD<ushort>
uchar * dst, int len) const uchar * dst, int len) const
{ {
int x = 0; int x = 0;
const int width = v_uint16x8::nlanes * 2; const int width = v_uint16::nlanes * 2;
for (; x <= len - width; x += width) for (; x <= len - width; x += width)
{ {
v_uint16x8 values1 = v_load(src1 + x); v_uint16 values1 = vx_load(src1 + x);
v_uint16x8 low1 = v_load(src2 + x); v_uint16 low1 = vx_load(src2 + x);
v_uint16x8 high1 = v_load(src3 + x); v_uint16 high1 = vx_load(src3 + x);
v_uint16x8 values2 = v_load(src1 + x + v_uint16x8::nlanes); v_uint16 values2 = vx_load(src1 + x + v_uint16::nlanes);
v_uint16x8 low2 = v_load(src2 + x + v_uint16x8::nlanes); v_uint16 low2 = vx_load(src2 + x + v_uint16::nlanes);
v_uint16x8 high2 = v_load(src3 + x + v_uint16x8::nlanes); v_uint16 high2 = vx_load(src3 + x + v_uint16::nlanes);
v_store(dst + x, v_pack((values1 >= low1) & (high1 >= values1), (values2 >= low2) & (high2 >= values2))); v_store(dst + x, v_pack((values1 >= low1) & (high1 >= values1), (values2 >= low2) & (high2 >= values2)));
} }
vx_cleanup();
return x; return x;
} }
}; };
@ -1409,20 +1412,21 @@ struct InRange_SIMD<short>
uchar * dst, int len) const uchar * dst, int len) const
{ {
int x = 0; int x = 0;
const int width = (int)v_int16x8::nlanes * 2; const int width = (int)v_int16::nlanes * 2;
for (; x <= len - width; x += width) for (; x <= len - width; x += width)
{ {
v_int16x8 values1 = v_load(src1 + x); v_int16 values1 = vx_load(src1 + x);
v_int16x8 low1 = v_load(src2 + x); v_int16 low1 = vx_load(src2 + x);
v_int16x8 high1 = v_load(src3 + x); v_int16 high1 = vx_load(src3 + x);
v_int16x8 values2 = v_load(src1 + x + v_int16x8::nlanes); v_int16 values2 = vx_load(src1 + x + v_int16::nlanes);
v_int16x8 low2 = v_load(src2 + x + v_int16x8::nlanes); v_int16 low2 = vx_load(src2 + x + v_int16::nlanes);
v_int16x8 high2 = v_load(src3 + x + v_int16x8::nlanes); v_int16 high2 = vx_load(src3 + x + v_int16::nlanes);
v_store((schar*)(dst + x), v_pack((values1 >= low1) & (high1 >= values1), (values2 >= low2) & (high2 >= values2))); v_store((schar*)(dst + x), v_pack((values1 >= low1) & (high1 >= values1), (values2 >= low2) & (high2 >= values2)));
} }
vx_cleanup();
return x; return x;
} }
}; };
@ -1434,20 +1438,21 @@ struct InRange_SIMD<int>
uchar * dst, int len) const uchar * dst, int len) const
{ {
int x = 0; int x = 0;
const int width = (int)v_int32x4::nlanes * 2; const int width = (int)v_int32::nlanes * 2;
for (; x <= len - width; x += width) for (; x <= len - width; x += width)
{ {
v_int32x4 values1 = v_load(src1 + x); v_int32 values1 = vx_load(src1 + x);
v_int32x4 low1 = v_load(src2 + x); v_int32 low1 = vx_load(src2 + x);
v_int32x4 high1 = v_load(src3 + x); v_int32 high1 = vx_load(src3 + x);
v_int32x4 values2 = v_load(src1 + x + v_int32x4::nlanes); v_int32 values2 = vx_load(src1 + x + v_int32::nlanes);
v_int32x4 low2 = v_load(src2 + x + v_int32x4::nlanes); v_int32 low2 = vx_load(src2 + x + v_int32::nlanes);
v_int32x4 high2 = v_load(src3 + x + v_int32x4::nlanes); v_int32 high2 = vx_load(src3 + x + v_int32::nlanes);
v_pack_store(dst + x, v_reinterpret_as_u16(v_pack((values1 >= low1) & (high1 >= values1), (values2 >= low2) & (high2 >= values2)))); v_pack_store(dst + x, v_reinterpret_as_u16(v_pack((values1 >= low1) & (high1 >= values1), (values2 >= low2) & (high2 >= values2))));
} }
vx_cleanup();
return x; return x;
} }
}; };
@ -1459,20 +1464,21 @@ struct InRange_SIMD<float>
uchar * dst, int len) const uchar * dst, int len) const
{ {
int x = 0; int x = 0;
const int width = (int)v_float32x4::nlanes * 2; const int width = (int)v_float32::nlanes * 2;
for (; x <= len - width; x += width) for (; x <= len - width; x += width)
{ {
v_float32x4 values1 = v_load(src1 + x); v_float32 values1 = vx_load(src1 + x);
v_float32x4 low1 = v_load(src2 + x); v_float32 low1 = vx_load(src2 + x);
v_float32x4 high1 = v_load(src3 + x); v_float32 high1 = vx_load(src3 + x);
v_float32x4 values2 = v_load(src1 + x + v_float32x4::nlanes); v_float32 values2 = vx_load(src1 + x + v_float32::nlanes);
v_float32x4 low2 = v_load(src2 + x + v_float32x4::nlanes); v_float32 low2 = vx_load(src2 + x + v_float32::nlanes);
v_float32x4 high2 = v_load(src3 + x + v_float32x4::nlanes); v_float32 high2 = vx_load(src3 + x + v_float32::nlanes);
v_pack_store(dst + x, v_pack(v_reinterpret_as_u32((values1 >= low1) & (high1 >= values1)), v_reinterpret_as_u32((values2 >= low2) & (high2 >= values2)))); v_pack_store(dst + x, v_pack(v_reinterpret_as_u32((values1 >= low1) & (high1 >= values1)), v_reinterpret_as_u32((values2 >= low2) & (high2 >= values2))));
} }
vx_cleanup();
return x; return x;
} }
}; };

View File

@ -77,6 +77,15 @@ CV__DNN_INLINE_NS_BEGIN
static Ptr<Layer> create(const LayerParams &params); static Ptr<Layer> create(const LayerParams &params);
}; };
/**
* Constant layer produces the same data blob at an every forward pass.
*/
class CV_EXPORTS ConstLayer : public Layer
{
public:
static Ptr<Layer> create(const LayerParams &params);
};
//! LSTM recurrent layer //! LSTM recurrent layer
class CV_EXPORTS LSTMLayer : public Layer class CV_EXPORTS LSTMLayer : public Layer
{ {

View File

@ -88,6 +88,9 @@ CV__DNN_INLINE_NS_BEGIN
DNN_TARGET_FPGA DNN_TARGET_FPGA
}; };
CV_EXPORTS std::vector< std::pair<Backend, Target> > getAvailableBackends();
CV_EXPORTS std::vector<Target> getAvailableTargets(Backend be);
/** @brief This class provides all data needed to initialize layer. /** @brief This class provides all data needed to initialize layer.
* *
* It includes dictionary with scalar params (which can be read by using Dict interface), * It includes dictionary with scalar params (which can be read by using Dict interface),

View File

@ -6,7 +6,7 @@
#define OPENCV_DNN_VERSION_HPP #define OPENCV_DNN_VERSION_HPP
/// Use with major OpenCV version only. /// Use with major OpenCV version only.
#define OPENCV_DNN_API_VERSION 20181121 #define OPENCV_DNN_API_VERSION 20181205
#if !defined CV_DOXYGEN && !defined CV_DNN_DONT_ADD_INLINE_NS #if !defined CV_DOXYGEN && !defined CV_DNN_DONT_ADD_INLINE_NS
#define CV__DNN_INLINE_NS __CV_CAT(dnn4_v, OPENCV_DNN_API_VERSION) #define CV__DNN_INLINE_NS __CV_CAT(dnn4_v, OPENCV_DNN_API_VERSION)

View File

@ -31,23 +31,6 @@ public:
void processNet(std::string weights, std::string proto, std::string halide_scheduler, void processNet(std::string weights, std::string proto, std::string halide_scheduler,
const Mat& input, const std::string& outputLayer = "") const Mat& input, const std::string& outputLayer = "")
{ {
if (backend == DNN_BACKEND_OPENCV && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
{
#if defined(HAVE_OPENCL)
if (!cv::ocl::useOpenCL())
#endif
{
throw cvtest::SkipTestException("OpenCL is not available/disabled in OpenCV");
}
}
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
{
if (!checkIETarget(DNN_TARGET_MYRIAD))
{
throw SkipTestException("Myriad is not available/disabled in OpenCV");
}
}
randu(input, 0.0f, 1.0f); randu(input, 0.0f, 1.0f);
weights = findDataFile(weights, false); weights = findDataFile(weights, false);

View File

@ -85,6 +85,111 @@ using std::map;
using std::make_pair; using std::make_pair;
using std::set; using std::set;
//==================================================================================================
class BackendRegistry
{
public:
typedef std::vector< std::pair<Backend, Target> > BackendsList;
const BackendsList & getBackends() const { return backends; }
static BackendRegistry & getRegistry()
{
static BackendRegistry impl;
return impl;
}
private:
BackendRegistry()
{
#ifdef HAVE_HALIDE
backends.push_back(std::make_pair(DNN_BACKEND_HALIDE, DNN_TARGET_CPU));
# ifdef HAVE_OPENCL
if (cv::ocl::useOpenCL())
backends.push_back(std::make_pair(DNN_BACKEND_HALIDE, DNN_TARGET_OPENCL));
# endif
#endif // HAVE_HALIDE
#ifdef HAVE_INF_ENGINE
if (checkIETarget(DNN_TARGET_CPU))
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_CPU));
if (checkIETarget(DNN_TARGET_MYRIAD))
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_MYRIAD));
if (checkIETarget(DNN_TARGET_FPGA))
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_FPGA));
# ifdef HAVE_OPENCL
if (cv::ocl::useOpenCL() && ocl::Device::getDefault().isIntel())
{
if (checkIETarget(DNN_TARGET_OPENCL))
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_OPENCL));
if (checkIETarget(DNN_TARGET_OPENCL_FP16))
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_OPENCL_FP16));
}
# endif
#endif // HAVE_INF_ENGINE
#ifdef HAVE_OPENCL
if (cv::ocl::useOpenCL())
{
backends.push_back(std::make_pair(DNN_BACKEND_OPENCV, DNN_TARGET_OPENCL));
backends.push_back(std::make_pair(DNN_BACKEND_OPENCV, DNN_TARGET_OPENCL_FP16));
}
#endif
backends.push_back(std::make_pair(DNN_BACKEND_OPENCV, DNN_TARGET_CPU));
#ifdef HAVE_VULKAN
backends.push_back(std::make_pair(DNN_BACKEND_VKCOM, DNN_TARGET_VULKAN)); // TODO Add device check
#endif
}
static inline bool checkIETarget(int target)
{
#ifndef HAVE_INF_ENGINE
return false;
#else
cv::dnn::Net net;
cv::dnn::LayerParams lp;
net.addLayerToPrev("testLayer", "Identity", lp);
net.setPreferableBackend(cv::dnn::DNN_BACKEND_INFERENCE_ENGINE);
net.setPreferableTarget(target);
static int inpDims[] = {1, 2, 3, 4};
net.setInput(cv::Mat(4, &inpDims[0], CV_32FC1, cv::Scalar(0)));
try
{
net.forward();
}
catch(...)
{
return false;
}
return true;
#endif
}
BackendsList backends;
};
std::vector< std::pair<Backend, Target> > getAvailableBackends()
{
return BackendRegistry::getRegistry().getBackends();
}
std::vector<Target> getAvailableTargets(Backend be)
{
if (be == DNN_BACKEND_DEFAULT)
be = (Backend)PARAM_DNN_BACKEND_DEFAULT;
std::vector<Target> result;
const BackendRegistry::BackendsList all_backends = getAvailableBackends();
for(BackendRegistry::BackendsList::const_iterator i = all_backends.begin(); i != all_backends.end(); ++i )
{
if (i->first == be)
result.push_back(i->second);
}
return result;
}
//==================================================================================================
namespace namespace
{ {
typedef std::vector<MatShape> ShapesVec; typedef std::vector<MatShape> ShapesVec;

View File

@ -112,6 +112,7 @@ void initializeLayerFactory()
CV_DNN_REGISTER_LAYER_CLASS(Dropout, BlankLayer); CV_DNN_REGISTER_LAYER_CLASS(Dropout, BlankLayer);
CV_DNN_REGISTER_LAYER_CLASS(Identity, BlankLayer); CV_DNN_REGISTER_LAYER_CLASS(Identity, BlankLayer);
CV_DNN_REGISTER_LAYER_CLASS(Silence, BlankLayer); CV_DNN_REGISTER_LAYER_CLASS(Silence, BlankLayer);
CV_DNN_REGISTER_LAYER_CLASS(Const, ConstLayer);
CV_DNN_REGISTER_LAYER_CLASS(Crop, CropLayer); CV_DNN_REGISTER_LAYER_CLASS(Crop, CropLayer);
CV_DNN_REGISTER_LAYER_CLASS(Eltwise, EltwiseLayer); CV_DNN_REGISTER_LAYER_CLASS(Eltwise, EltwiseLayer);

View File

@ -0,0 +1,68 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
// Copyright (C) 2018, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
#include "../precomp.hpp"
#include "layers_common.hpp"
#ifdef HAVE_OPENCL
#include "opencl_kernels_dnn.hpp"
#endif
namespace cv { namespace dnn {
class ConstLayerImpl CV_FINAL : public ConstLayer
{
public:
ConstLayerImpl(const LayerParams& params)
{
setParamsFrom(params);
CV_Assert(blobs.size() == 1);
}
virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const CV_OVERRIDE
{
CV_Assert(inputs.empty());
outputs.assign(1, shape(blobs[0]));
return false;
}
#ifdef HAVE_OPENCL
bool forward_ocl(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals)
{
std::vector<UMat> outputs;
outs.getUMatVector(outputs);
if (outs.depth() == CV_16S)
convertFp16(blobs[0], outputs[0]);
else
blobs[0].copyTo(outputs[0]);
return true;
}
#endif
void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
std::vector<Mat> outputs;
outputs_arr.getMatVector(outputs);
blobs[0].copyTo(outputs[0]);
}
};
Ptr<Layer> ConstLayer::create(const LayerParams& params)
{
return Ptr<Layer>(new ConstLayerImpl(params));
}
}} // namespace cv::dnn

View File

@ -1266,14 +1266,31 @@ void TFImporter::populateNet(Net dstNet)
axis = toNCHW(axis); axis = toNCHW(axis);
layerParams.set("axis", axis); layerParams.set("axis", axis);
int id = dstNet.addLayer(name, "Concat", layerParams); // input(0) or input(n-1) is concat_dim
layer_id[name] = id;
int from = (type == "Concat" ? 1 : 0); int from = (type == "Concat" ? 1 : 0);
int to = (type == "Concat" ? layer.input_size() : layer.input_size() - 1); int to = (type == "Concat" ? layer.input_size() : layer.input_size() - 1);
// input(0) or input(n-1) is concat_dim for (int ii = from; ii < to; ii++)
{
Pin inp = parsePin(layer.input(ii));
if (layer_id.find(inp.name) == layer_id.end())
{
// There are constant inputs.
LayerParams lp;
lp.name = inp.name;
lp.type = "Const";
lp.blobs.resize(1);
blobFromTensor(getConstBlob(layer, value_id, ii), lp.blobs.back());
CV_Assert_N(!lp.blobs[0].empty(), lp.blobs[0].type() == CV_32F);
int constInpId = dstNet.addLayer(lp.name, lp.type, lp);
layer_id[lp.name] = constInpId;
}
}
int id = dstNet.addLayer(name, "Concat", layerParams);
layer_id[name] = id;
for (int ii = from; ii < to; ii++) for (int ii = from; ii < to; ii++)
{ {
Pin inp = parsePin(layer.input(ii)); Pin inp = parsePin(layer.input(ii));

View File

@ -300,10 +300,11 @@ INSTANTIATE_TEST_CASE_P(/**/, Reproducibility_ResNet50,
typedef testing::TestWithParam<Target> Reproducibility_SqueezeNet_v1_1; typedef testing::TestWithParam<Target> Reproducibility_SqueezeNet_v1_1;
TEST_P(Reproducibility_SqueezeNet_v1_1, Accuracy) TEST_P(Reproducibility_SqueezeNet_v1_1, Accuracy)
{ {
int targetId = GetParam();
if(targetId == DNN_TARGET_OPENCL_FP16)
throw SkipTestException("This test does not support FP16");
Net net = readNetFromCaffe(findDataFile("dnn/squeezenet_v1.1.prototxt", false), Net net = readNetFromCaffe(findDataFile("dnn/squeezenet_v1.1.prototxt", false),
findDataFile("dnn/squeezenet_v1.1.caffemodel", false)); findDataFile("dnn/squeezenet_v1.1.caffemodel", false));
int targetId = GetParam();
net.setPreferableBackend(DNN_BACKEND_OPENCV); net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.setPreferableTarget(targetId); net.setPreferableTarget(targetId);
@ -324,7 +325,8 @@ TEST_P(Reproducibility_SqueezeNet_v1_1, Accuracy)
Mat ref = blobFromNPY(_tf("squeezenet_v1.1_prob.npy")); Mat ref = blobFromNPY(_tf("squeezenet_v1.1_prob.npy"));
normAssert(ref, out); normAssert(ref, out);
} }
INSTANTIATE_TEST_CASE_P(/**/, Reproducibility_SqueezeNet_v1_1, availableDnnTargets()); INSTANTIATE_TEST_CASE_P(/**/, Reproducibility_SqueezeNet_v1_1,
testing::ValuesIn(getAvailableTargets(DNN_BACKEND_OPENCV)));
TEST(Reproducibility_AlexNet_fp16, Accuracy) TEST(Reproducibility_AlexNet_fp16, Accuracy)
{ {

View File

@ -191,30 +191,6 @@ static inline void normAssertDetections(cv::Mat ref, cv::Mat out, const char *co
testBoxes, comment, confThreshold, scores_diff, boxes_iou_diff); testBoxes, comment, confThreshold, scores_diff, boxes_iou_diff);
} }
static inline bool checkIETarget(int target)
{
#ifndef HAVE_INF_ENGINE
return false;
#else
cv::dnn::Net net;
cv::dnn::LayerParams lp;
net.addLayerToPrev("testLayer", "Identity", lp);
net.setPreferableBackend(cv::dnn::DNN_BACKEND_INFERENCE_ENGINE);
net.setPreferableTarget(target);
static int inpDims[] = {1, 2, 3, 4};
net.setInput(cv::Mat(4, &inpDims[0], CV_32FC1, cv::Scalar(0)));
try
{
net.forward();
}
catch(...)
{
return false;
}
return true;
#endif
}
static inline bool readFileInMemory(const std::string& filename, std::string& content) static inline bool readFileInMemory(const std::string& filename, std::string& content)
{ {
std::ios::openmode mode = std::ios::in | std::ios::binary; std::ios::openmode mode = std::ios::in | std::ios::binary;
@ -239,52 +215,36 @@ namespace opencv_test {
using namespace cv::dnn; using namespace cv::dnn;
static inline static inline
testing::internal::ParamGenerator<tuple<Backend, Target> > dnnBackendsAndTargets( testing::internal::ParamGenerator< tuple<Backend, Target> > dnnBackendsAndTargets(
bool withInferenceEngine = true, bool withInferenceEngine = true,
bool withHalide = false, bool withHalide = false,
bool withCpuOCV = true, bool withCpuOCV = true,
bool withVkCom = true bool withVkCom = true
) )
{ {
std::vector<tuple<Backend, Target> > targets; std::vector< tuple<Backend, Target> > targets;
#ifdef HAVE_HALIDE std::vector< Target > available;
if (withHalide) if (withHalide)
{ {
targets.push_back(make_tuple(DNN_BACKEND_HALIDE, DNN_TARGET_CPU)); available = getAvailableTargets(DNN_BACKEND_HALIDE);
#ifdef HAVE_OPENCL for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
if (cv::ocl::useOpenCL()) targets.push_back(make_tuple(DNN_BACKEND_HALIDE, *i));
targets.push_back(make_tuple(DNN_BACKEND_HALIDE, DNN_TARGET_OPENCL));
#endif
} }
#endif
#ifdef HAVE_INF_ENGINE
if (withInferenceEngine) if (withInferenceEngine)
{ {
targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_CPU)); available = getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE);
#ifdef HAVE_OPENCL for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
if (cv::ocl::useOpenCL() && ocl::Device::getDefault().isIntel()) targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE, *i));
{
targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_OPENCL));
targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_OPENCL_FP16));
}
#endif
if (checkIETarget(DNN_TARGET_MYRIAD))
targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_MYRIAD));
} }
#endif
if (withCpuOCV)
targets.push_back(make_tuple(DNN_BACKEND_OPENCV, DNN_TARGET_CPU));
#ifdef HAVE_OPENCL
if (cv::ocl::useOpenCL())
{ {
targets.push_back(make_tuple(DNN_BACKEND_OPENCV, DNN_TARGET_OPENCL)); available = getAvailableTargets(DNN_BACKEND_OPENCV);
targets.push_back(make_tuple(DNN_BACKEND_OPENCV, DNN_TARGET_OPENCL_FP16)); for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
{
if (!withCpuOCV && *i == DNN_TARGET_CPU)
continue;
targets.push_back(make_tuple(DNN_BACKEND_OPENCV, *i));
}
} }
#endif
#ifdef HAVE_VULKAN
if (withVkCom)
targets.push_back(make_tuple(DNN_BACKEND_VKCOM, DNN_TARGET_VULKAN));
#endif
if (targets.empty()) // validate at least CPU mode if (targets.empty()) // validate at least CPU mode
targets.push_back(make_tuple(DNN_BACKEND_OPENCV, DNN_TARGET_CPU)); targets.push_back(make_tuple(DNN_BACKEND_OPENCV, DNN_TARGET_CPU));
return testing::ValuesIn(targets); return testing::ValuesIn(targets);
@ -296,21 +256,6 @@ testing::internal::ParamGenerator<tuple<Backend, Target> > dnnBackendsAndTargets
namespace opencv_test { namespace opencv_test {
using namespace cv::dnn; using namespace cv::dnn;
static inline
testing::internal::ParamGenerator<Target> availableDnnTargets()
{
static std::vector<Target> targets;
if (targets.empty())
{
targets.push_back(DNN_TARGET_CPU);
#ifdef HAVE_OPENCL
if (cv::ocl::useOpenCL())
targets.push_back(DNN_TARGET_OPENCL);
#endif
}
return testing::ValuesIn(targets);
}
class DNNTestLayer : public TestWithParam<tuple<Backend, Target> > class DNNTestLayer : public TestWithParam<tuple<Backend, Target> >
{ {
public: public:
@ -339,23 +284,10 @@ public:
} }
} }
static void checkBackend(int backend, int target, Mat* inp = 0, Mat* ref = 0) static void checkBackend(int backend, int target, Mat* inp = 0, Mat* ref = 0)
{ {
if (backend == DNN_BACKEND_OPENCV && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
{
#ifdef HAVE_OPENCL
if (!cv::ocl::useOpenCL())
#endif
{
throw SkipTestException("OpenCL is not available/disabled in OpenCV");
}
}
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
{ {
if (!checkIETarget(DNN_TARGET_MYRIAD))
{
throw SkipTestException("Myriad is not available/disabled in OpenCV");
}
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018030000 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018030000
if (inp && ref && inp->size[0] != 1) if (inp && ref && inp->size[0] != 1)
{ {

View File

@ -55,9 +55,11 @@ static std::string _tf(TString filename)
typedef testing::TestWithParam<Target> Reproducibility_GoogLeNet; typedef testing::TestWithParam<Target> Reproducibility_GoogLeNet;
TEST_P(Reproducibility_GoogLeNet, Batching) TEST_P(Reproducibility_GoogLeNet, Batching)
{ {
const int targetId = GetParam();
if(targetId == DNN_TARGET_OPENCL_FP16)
throw SkipTestException("This test does not support FP16");
Net net = readNetFromCaffe(findDataFile("dnn/bvlc_googlenet.prototxt", false), Net net = readNetFromCaffe(findDataFile("dnn/bvlc_googlenet.prototxt", false),
findDataFile("dnn/bvlc_googlenet.caffemodel", false)); findDataFile("dnn/bvlc_googlenet.caffemodel", false));
int targetId = GetParam();
net.setPreferableBackend(DNN_BACKEND_OPENCV); net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.setPreferableTarget(targetId); net.setPreferableTarget(targetId);
@ -84,9 +86,11 @@ TEST_P(Reproducibility_GoogLeNet, Batching)
TEST_P(Reproducibility_GoogLeNet, IntermediateBlobs) TEST_P(Reproducibility_GoogLeNet, IntermediateBlobs)
{ {
const int targetId = GetParam();
if(targetId == DNN_TARGET_OPENCL_FP16)
throw SkipTestException("This test does not support FP16");
Net net = readNetFromCaffe(findDataFile("dnn/bvlc_googlenet.prototxt", false), Net net = readNetFromCaffe(findDataFile("dnn/bvlc_googlenet.prototxt", false),
findDataFile("dnn/bvlc_googlenet.caffemodel", false)); findDataFile("dnn/bvlc_googlenet.caffemodel", false));
int targetId = GetParam();
net.setPreferableBackend(DNN_BACKEND_OPENCV); net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.setPreferableTarget(targetId); net.setPreferableTarget(targetId);
@ -113,9 +117,11 @@ TEST_P(Reproducibility_GoogLeNet, IntermediateBlobs)
TEST_P(Reproducibility_GoogLeNet, SeveralCalls) TEST_P(Reproducibility_GoogLeNet, SeveralCalls)
{ {
const int targetId = GetParam();
if(targetId == DNN_TARGET_OPENCL_FP16)
throw SkipTestException("This test does not support FP16");
Net net = readNetFromCaffe(findDataFile("dnn/bvlc_googlenet.prototxt", false), Net net = readNetFromCaffe(findDataFile("dnn/bvlc_googlenet.prototxt", false),
findDataFile("dnn/bvlc_googlenet.caffemodel", false)); findDataFile("dnn/bvlc_googlenet.caffemodel", false));
int targetId = GetParam();
net.setPreferableBackend(DNN_BACKEND_OPENCV); net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.setPreferableTarget(targetId); net.setPreferableTarget(targetId);
@ -143,6 +149,7 @@ TEST_P(Reproducibility_GoogLeNet, SeveralCalls)
normAssert(outs[0], ref, "", 1E-4, 1E-2); normAssert(outs[0], ref, "", 1E-4, 1E-2);
} }
INSTANTIATE_TEST_CASE_P(/**/, Reproducibility_GoogLeNet, availableDnnTargets()); INSTANTIATE_TEST_CASE_P(/**/, Reproducibility_GoogLeNet,
testing::ValuesIn(getAvailableTargets(DNN_BACKEND_OPENCV)));
}} // namespace }} // namespace

View File

@ -203,7 +203,8 @@ TEST_P(DNNTestOpenVINO, models)
std::map<std::string, cv::Mat> inputsMap; std::map<std::string, cv::Mat> inputsMap;
std::map<std::string, cv::Mat> ieOutputsMap, cvOutputsMap; std::map<std::string, cv::Mat> ieOutputsMap, cvOutputsMap;
// Single Myriad device cannot be shared across multiple processes. // Single Myriad device cannot be shared across multiple processes.
resetMyriadDevice(); if (target == DNN_TARGET_MYRIAD)
resetMyriadDevice();
runIE(target, xmlPath, binPath, inputsMap, ieOutputsMap); runIE(target, xmlPath, binPath, inputsMap, ieOutputsMap);
runCV(target, xmlPath, binPath, inputsMap, cvOutputsMap); runCV(target, xmlPath, binPath, inputsMap, cvOutputsMap);
@ -245,27 +246,10 @@ static testing::internal::ParamGenerator<String> intelModels()
return ValuesIn(modelsNames); return ValuesIn(modelsNames);
} }
static testing::internal::ParamGenerator<Target> dnnDLIETargets() INSTANTIATE_TEST_CASE_P(/**/,
{ DNNTestOpenVINO,
std::vector<Target> targets; Combine(testing::ValuesIn(getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE)), intelModels())
targets.push_back(DNN_TARGET_CPU); );
#ifdef HAVE_OPENCL
if (cv::ocl::useOpenCL() && ocl::Device::getDefault().isIntel())
{
targets.push_back(DNN_TARGET_OPENCL);
targets.push_back(DNN_TARGET_OPENCL_FP16);
}
#endif
if (checkIETarget(DNN_TARGET_MYRIAD))
targets.push_back(DNN_TARGET_MYRIAD);
if (checkIETarget(DNN_TARGET_FPGA))
targets.push_back(DNN_TARGET_FPGA);
return testing::ValuesIn(targets);
}
INSTANTIATE_TEST_CASE_P(/**/, DNNTestOpenVINO, Combine(
dnnDLIETargets(), intelModels()
));
}} }}
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE

View File

@ -157,8 +157,6 @@ TEST_P(setInput, normalization)
const int target = get<1>(get<3>(GetParam())); const int target = get<1>(get<3>(GetParam()));
const bool kSwapRB = true; const bool kSwapRB = true;
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD && !checkIETarget(DNN_TARGET_MYRIAD))
throw SkipTestException("Myriad is not available/disabled in OpenCV");
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16 && dtype != CV_32F) if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16 && dtype != CV_32F)
throw SkipTestException(""); throw SkipTestException("");
if (backend == DNN_BACKEND_VKCOM && dtype != CV_32F) if (backend == DNN_BACKEND_VKCOM && dtype != CV_32F)

View File

@ -136,6 +136,7 @@ TEST_P(Test_TensorFlow_layers, padding)
runTensorFlowNet("padding_same"); runTensorFlowNet("padding_same");
runTensorFlowNet("padding_valid"); runTensorFlowNet("padding_valid");
runTensorFlowNet("spatial_padding"); runTensorFlowNet("spatial_padding");
runTensorFlowNet("keras_pad_concat");
} }
TEST_P(Test_TensorFlow_layers, eltwise_add_mul) TEST_P(Test_TensorFlow_layers, eltwise_add_mul)

View File

@ -4659,7 +4659,7 @@ LineIterator it2 = it;
vector<Vec3b> buf(it.count); vector<Vec3b> buf(it.count);
for(int i = 0; i < it.count; i++, ++it) for(int i = 0; i < it.count; i++, ++it)
buf[i] = *(const Vec3b)*it; buf[i] = *(const Vec3b*)*it;
// alternative way of iterating through the line // alternative way of iterating through the line
for(int i = 0; i < it2.count; i++, ++it2) for(int i = 0; i < it2.count; i++, ++it2)

View File

@ -376,43 +376,6 @@ results, default value is 0.85.
*/ */
CV_EXPORTS_W Ptr<TonemapDrago> createTonemapDrago(float gamma = 1.0f, float saturation = 1.0f, float bias = 0.85f); CV_EXPORTS_W Ptr<TonemapDrago> createTonemapDrago(float gamma = 1.0f, float saturation = 1.0f, float bias = 0.85f);
/** @brief This algorithm decomposes image into two layers: base layer and detail layer using bilateral filter
and compresses contrast of the base layer thus preserving all the details.
This implementation uses regular bilateral filter from opencv.
Saturation enhancement is possible as in ocvTonemapDrago.
For more information see @cite DD02 .
*/
class CV_EXPORTS_W TonemapDurand : public Tonemap
{
public:
CV_WRAP virtual float getSaturation() const = 0;
CV_WRAP virtual void setSaturation(float saturation) = 0;
CV_WRAP virtual float getContrast() const = 0;
CV_WRAP virtual void setContrast(float contrast) = 0;
CV_WRAP virtual float getSigmaSpace() const = 0;
CV_WRAP virtual void setSigmaSpace(float sigma_space) = 0;
CV_WRAP virtual float getSigmaColor() const = 0;
CV_WRAP virtual void setSigmaColor(float sigma_color) = 0;
};
/** @brief Creates TonemapDurand object
@param gamma gamma value for gamma correction. See createTonemap
@param contrast resulting contrast on logarithmic scale, i. e. log(max / min), where max and min
are maximum and minimum luminance values of the resulting image.
@param saturation saturation enhancement value. See createTonemapDrago
@param sigma_space bilateral filter sigma in color space
@param sigma_color bilateral filter sigma in coordinate space
*/
CV_EXPORTS_W Ptr<TonemapDurand>
createTonemapDurand(float gamma = 1.0f, float contrast = 4.0f, float saturation = 1.0f, float sigma_space = 2.0f, float sigma_color = 2.0f);
/** @brief This is a global tonemapping operator that models human visual system. /** @brief This is a global tonemapping operator that models human visual system.

View File

@ -193,94 +193,6 @@ Ptr<TonemapDrago> createTonemapDrago(float gamma, float saturation, float bias)
return makePtr<TonemapDragoImpl>(gamma, saturation, bias); return makePtr<TonemapDragoImpl>(gamma, saturation, bias);
} }
class TonemapDurandImpl CV_FINAL : public TonemapDurand
{
public:
TonemapDurandImpl(float _gamma, float _contrast, float _saturation, float _sigma_color, float _sigma_space) :
name("TonemapDurand"),
gamma(_gamma),
contrast(_contrast),
saturation(_saturation),
sigma_color(_sigma_color),
sigma_space(_sigma_space)
{
}
void process(InputArray _src, OutputArray _dst) CV_OVERRIDE
{
CV_INSTRUMENT_REGION();
Mat src = _src.getMat();
CV_Assert(!src.empty());
_dst.create(src.size(), CV_32FC3);
Mat img = _dst.getMat();
Ptr<Tonemap> linear = createTonemap(1.0f);
linear->process(src, img);
Mat gray_img;
cvtColor(img, gray_img, COLOR_RGB2GRAY);
Mat log_img;
log_(gray_img, log_img);
Mat map_img;
bilateralFilter(log_img, map_img, -1, sigma_color, sigma_space);
double min, max;
minMaxLoc(map_img, &min, &max);
float scale = contrast / static_cast<float>(max - min);
exp(map_img * (scale - 1.0f) + log_img, map_img);
log_img.release();
mapLuminance(img, img, gray_img, map_img, saturation);
pow(img, 1.0f / gamma, img);
}
float getGamma() const CV_OVERRIDE { return gamma; }
void setGamma(float val) CV_OVERRIDE { gamma = val; }
float getSaturation() const CV_OVERRIDE { return saturation; }
void setSaturation(float val) CV_OVERRIDE { saturation = val; }
float getContrast() const CV_OVERRIDE { return contrast; }
void setContrast(float val) CV_OVERRIDE { contrast = val; }
float getSigmaColor() const CV_OVERRIDE { return sigma_color; }
void setSigmaColor(float val) CV_OVERRIDE { sigma_color = val; }
float getSigmaSpace() const CV_OVERRIDE { return sigma_space; }
void setSigmaSpace(float val) CV_OVERRIDE { sigma_space = val; }
void write(FileStorage& fs) const CV_OVERRIDE
{
writeFormat(fs);
fs << "name" << name
<< "gamma" << gamma
<< "contrast" << contrast
<< "sigma_color" << sigma_color
<< "sigma_space" << sigma_space
<< "saturation" << saturation;
}
void read(const FileNode& fn) CV_OVERRIDE
{
FileNode n = fn["name"];
CV_Assert(n.isString() && String(n) == name);
gamma = fn["gamma"];
contrast = fn["contrast"];
sigma_color = fn["sigma_color"];
sigma_space = fn["sigma_space"];
saturation = fn["saturation"];
}
protected:
String name;
float gamma, contrast, saturation, sigma_color, sigma_space;
};
Ptr<TonemapDurand> createTonemapDurand(float gamma, float contrast, float saturation, float sigma_color, float sigma_space)
{
return makePtr<TonemapDurandImpl>(gamma, contrast, saturation, sigma_color, sigma_space);
}
class TonemapReinhardImpl CV_FINAL : public TonemapReinhard class TonemapReinhardImpl CV_FINAL : public TonemapReinhard
{ {
public: public:

View File

@ -105,12 +105,6 @@ TEST(Photo_Tonemap, regression)
result.convertTo(result, CV_8UC3, 255); result.convertTo(result, CV_8UC3, 255);
checkEqual(result, expected, 3, "Drago"); checkEqual(result, expected, 3, "Drago");
Ptr<TonemapDurand> durand = createTonemapDurand(gamma);
durand->process(img, result);
loadImage(test_path + "durand.png", expected);
result.convertTo(result, CV_8UC3, 255);
checkEqual(result, expected, 3, "Durand");
Ptr<TonemapReinhard> reinhard = createTonemapReinhard(gamma); Ptr<TonemapReinhard> reinhard = createTonemapReinhard(gamma);
reinhard->process(img, result); reinhard->process(img, result);
loadImage(test_path + "reinhard.png", expected); loadImage(test_path + "reinhard.png", expected);

View File

@ -280,7 +280,7 @@ PERF_TEST_P( matchVector, affineBestOf2NearestVectorFeatures, testing::Combine(
if (pairwise_matches[i].src_img_idx < 0) if (pairwise_matches[i].src_img_idx < 0)
continue; continue;
EXPECT_GT(pairwise_matches[i].matches.size(), 200u); EXPECT_GT(pairwise_matches[i].matches.size(), 150u);
EXPECT_FALSE(pairwise_matches[i].H.empty()); EXPECT_FALSE(pairwise_matches[i].H.empty());
++matches_count; ++matches_count;
} }

View File

@ -35,7 +35,7 @@ int main(int argc, char**argv)
//! [Tonemap HDR image] //! [Tonemap HDR image]
Mat ldr; Mat ldr;
Ptr<TonemapDurand> tonemap = createTonemapDurand(2.2f); Ptr<Tonemap> tonemap = createTonemap(2.2f);
tonemap->process(hdr, ldr); tonemap->process(hdr, ldr);
//! [Tonemap HDR image] //! [Tonemap HDR image]

View File

@ -13,7 +13,7 @@ import org.opencv.photo.CalibrateDebevec;
import org.opencv.photo.MergeDebevec; import org.opencv.photo.MergeDebevec;
import org.opencv.photo.MergeMertens; import org.opencv.photo.MergeMertens;
import org.opencv.photo.Photo; import org.opencv.photo.Photo;
import org.opencv.photo.TonemapDurand; import org.opencv.photo.Tonemap;
class HDRImaging { class HDRImaging {
public void loadExposureSeq(String path, List<Mat> images, List<Float> times) { public void loadExposureSeq(String path, List<Mat> images, List<Float> times) {
@ -71,7 +71,7 @@ class HDRImaging {
//! [Tonemap HDR image] //! [Tonemap HDR image]
Mat ldr = new Mat(); Mat ldr = new Mat();
TonemapDurand tonemap = Photo.createTonemapDurand(2.2f, 4.0f, 1.0f, 2.0f, 2.0f); Tonemap tonemap = Photo.createTonemap(2.2f);
tonemap.process(hdr, ldr); tonemap.process(hdr, ldr);
//! [Tonemap HDR image] //! [Tonemap HDR image]

View File

@ -40,7 +40,7 @@ hdr = merge_debevec.process(images, times, response)
## [Make HDR image] ## [Make HDR image]
## [Tonemap HDR image] ## [Tonemap HDR image]
tonemap = cv.createTonemapDurand(2.2) tonemap = cv.createTonemap(2.2)
ldr = tonemap.process(hdr) ldr = tonemap.process(hdr)
## [Tonemap HDR image] ## [Tonemap HDR image]