mirror of
https://github.com/opencv/opencv.git
synced 2025-06-27 23:11:57 +08:00
Merge branch '3.4' into merge-3.4
This commit is contained in:
commit
1dee705074
@ -1,3 +1,6 @@
|
||||
set(OPENCV_JAVA_SOURCE_VERSION "" CACHE STRING "Java source version (javac Ant target)")
|
||||
set(OPENCV_JAVA_TARGET_VERSION "" CACHE STRING "Java target version (javac Ant target)")
|
||||
|
||||
file(TO_CMAKE_PATH "$ENV{ANT_DIR}" ANT_DIR_ENV_PATH)
|
||||
file(TO_CMAKE_PATH "$ENV{ProgramFiles}" ProgramFiles_ENV_PATH)
|
||||
|
||||
|
@ -78,9 +78,9 @@ endif()
|
||||
|
||||
if(INF_ENGINE_TARGET)
|
||||
if(NOT INF_ENGINE_RELEASE)
|
||||
message(WARNING "InferenceEngine version have not been set, 2018R4 will be used by default. Set INF_ENGINE_RELEASE variable if you experience build errors.")
|
||||
message(WARNING "InferenceEngine version have not been set, 2018R5 will be used by default. Set INF_ENGINE_RELEASE variable if you experience build errors.")
|
||||
endif()
|
||||
set(INF_ENGINE_RELEASE "2018040000" CACHE STRING "Force IE version, should be in form YYYYAABBCC (e.g. 2018R2.0.2 -> 2018020002)")
|
||||
set(INF_ENGINE_RELEASE "2018050000" CACHE STRING "Force IE version, should be in form YYYYAABBCC (e.g. 2018R2.0.2 -> 2018020002)")
|
||||
set_target_properties(${INF_ENGINE_TARGET} PROPERTIES
|
||||
INTERFACE_COMPILE_DEFINITIONS "HAVE_INF_ENGINE=1;INF_ENGINE_RELEASE=${INF_ENGINE_RELEASE}"
|
||||
)
|
||||
|
@ -12,7 +12,7 @@ Tutorial was written for the following versions of corresponding software:
|
||||
|
||||
- Download and install Android Studio from https://developer.android.com/studio.
|
||||
|
||||
- Get the latest pre-built OpenCV for Android release from https://github.com/opencv/opencv/releases and unpack it (for example, `opencv-3.4.4-android-sdk.zip`).
|
||||
- Get the latest pre-built OpenCV for Android release from https://github.com/opencv/opencv/releases and unpack it (for example, `opencv-3.4.5-android-sdk.zip`).
|
||||
|
||||
- Download MobileNet object detection model from https://github.com/chuanqi305/MobileNet-SSD. We need a configuration file `MobileNetSSD_deploy.prototxt` and weights `MobileNetSSD_deploy.caffemodel`.
|
||||
|
||||
|
@ -1278,6 +1278,16 @@ OPENCV_HAL_IMPL_AVX_CHECK_FLT(v_float64x4, 15)
|
||||
OPENCV_HAL_IMPL_AVX_MULADD(v_float32x8, ps)
|
||||
OPENCV_HAL_IMPL_AVX_MULADD(v_float64x4, pd)
|
||||
|
||||
inline v_int32x8 v_fma(const v_int32x8& a, const v_int32x8& b, const v_int32x8& c)
|
||||
{
|
||||
return a * b + c;
|
||||
}
|
||||
|
||||
inline v_int32x8 v_muladd(const v_int32x8& a, const v_int32x8& b, const v_int32x8& c)
|
||||
{
|
||||
return v_fma(a, b, c);
|
||||
}
|
||||
|
||||
inline v_float32x8 v_invsqrt(const v_float32x8& x)
|
||||
{
|
||||
v_float32x8 half = x * v256_setall_f32(0.5);
|
||||
|
@ -750,6 +750,7 @@ CV__DNN_INLINE_NS_BEGIN
|
||||
* @brief Reads a network model stored in <a href="http://torch.ch">Torch7</a> framework's format.
|
||||
* @param model path to the file, dumped from Torch by using torch.save() function.
|
||||
* @param isBinary specifies whether the network was serialized in ascii mode or binary.
|
||||
* @param evaluate specifies testing phase of network. If true, it's similar to evaluate() method in Torch.
|
||||
* @returns Net object.
|
||||
*
|
||||
* @note Ascii mode of Torch serializer is more preferable, because binary mode extensively use `long` type of C language,
|
||||
@ -771,7 +772,7 @@ CV__DNN_INLINE_NS_BEGIN
|
||||
*
|
||||
* Also some equivalents of these classes from cunn, cudnn, and fbcunn may be successfully imported.
|
||||
*/
|
||||
CV_EXPORTS_W Net readNetFromTorch(const String &model, bool isBinary = true);
|
||||
CV_EXPORTS_W Net readNetFromTorch(const String &model, bool isBinary = true, bool evaluate = true);
|
||||
|
||||
/**
|
||||
* @brief Read deep learning network represented in one of the supported formats.
|
||||
|
@ -6,7 +6,7 @@
|
||||
#define OPENCV_DNN_VERSION_HPP
|
||||
|
||||
/// Use with major OpenCV version only.
|
||||
#define OPENCV_DNN_API_VERSION 20181205
|
||||
#define OPENCV_DNN_API_VERSION 20181221
|
||||
|
||||
#if !defined CV_DOXYGEN && !defined CV_DNN_DONT_ADD_INLINE_NS
|
||||
#define CV__DNN_INLINE_NS __CV_CAT(dnn4_v, OPENCV_DNN_API_VERSION)
|
||||
|
@ -116,9 +116,15 @@ public:
|
||||
|
||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||
{
|
||||
#ifdef HAVE_INF_ENGINE
|
||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
|
||||
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
|
||||
return !zeroDev && eps <= 1e-7f;
|
||||
#else
|
||||
return !zeroDev && (preferableTarget == DNN_TARGET_CPU || eps <= 1e-7f);
|
||||
#endif
|
||||
else
|
||||
#endif // HAVE_INF_ENGINE
|
||||
return backendId == DNN_BACKEND_OPENCV;
|
||||
}
|
||||
|
||||
|
@ -420,31 +420,30 @@ void ONNXImporter::populateNet(Net dstNet)
|
||||
}
|
||||
else if (layer_type == "Sub")
|
||||
{
|
||||
Mat blob = (-1.0f) * getBlob(node_proto, constBlobs, 1);
|
||||
blob = blob.reshape(1, 1);
|
||||
Mat blob = getBlob(node_proto, constBlobs, 1);
|
||||
if (blob.total() == 1) {
|
||||
layerParams.type = "Power";
|
||||
layerParams.set("shift", blob.at<float>(0));
|
||||
layerParams.set("shift", -blob.at<float>(0));
|
||||
}
|
||||
else {
|
||||
layerParams.type = "Scale";
|
||||
layerParams.set("has_bias", true);
|
||||
layerParams.blobs.push_back(blob);
|
||||
layerParams.blobs.push_back(-1.0f * blob.reshape(1, 1));
|
||||
}
|
||||
}
|
||||
else if (layer_type == "Div")
|
||||
{
|
||||
Mat blob = getBlob(node_proto, constBlobs, 1);
|
||||
CV_Assert_N(blob.type() == CV_32F, blob.total());
|
||||
divide(1.0, blob, blob);
|
||||
if (blob.total() == 1)
|
||||
{
|
||||
layerParams.set("scale", blob.at<float>(0));
|
||||
layerParams.set("scale", 1.0f / blob.at<float>(0));
|
||||
layerParams.type = "Power";
|
||||
}
|
||||
else
|
||||
{
|
||||
layerParams.type = "Scale";
|
||||
divide(1.0, blob, blob);
|
||||
layerParams.blobs.push_back(blob);
|
||||
layerParams.set("bias_term", false);
|
||||
}
|
||||
|
@ -26,10 +26,11 @@
|
||||
#define INF_ENGINE_RELEASE_2018R2 2018020000
|
||||
#define INF_ENGINE_RELEASE_2018R3 2018030000
|
||||
#define INF_ENGINE_RELEASE_2018R4 2018040000
|
||||
#define INF_ENGINE_RELEASE_2018R5 2018050000
|
||||
|
||||
#ifndef INF_ENGINE_RELEASE
|
||||
#warning("IE version have not been provided via command-line. Using 2018R4 by default")
|
||||
#define INF_ENGINE_RELEASE INF_ENGINE_RELEASE_2018R4
|
||||
#warning("IE version have not been provided via command-line. Using 2018R5 by default")
|
||||
#define INF_ENGINE_RELEASE INF_ENGINE_RELEASE_2018R5
|
||||
#endif
|
||||
|
||||
#define INF_ENGINE_VER_MAJOR_GT(ver) (((INF_ENGINE_RELEASE) / 10000) > ((ver) / 10000))
|
||||
|
@ -129,13 +129,15 @@ struct TorchImporter
|
||||
Module *rootModule;
|
||||
Module *curModule;
|
||||
int moduleCounter;
|
||||
bool testPhase;
|
||||
|
||||
TorchImporter(String filename, bool isBinary)
|
||||
TorchImporter(String filename, bool isBinary, bool evaluate)
|
||||
{
|
||||
CV_TRACE_FUNCTION();
|
||||
|
||||
rootModule = curModule = NULL;
|
||||
moduleCounter = 0;
|
||||
testPhase = evaluate;
|
||||
|
||||
file = cv::Ptr<THFile>(THDiskFile_new(filename, "r", 0), THFile_free);
|
||||
CV_Assert(file && THFile_isOpened(file));
|
||||
@ -680,7 +682,8 @@ struct TorchImporter
|
||||
layerParams.blobs.push_back(tensorParams["bias"].second);
|
||||
}
|
||||
|
||||
if (nnName == "InstanceNormalization")
|
||||
bool trainPhase = scalarParams.get<bool>("train", false);
|
||||
if (nnName == "InstanceNormalization" || (trainPhase && !testPhase))
|
||||
{
|
||||
cv::Ptr<Module> mvnModule(new Module(nnName));
|
||||
mvnModule->apiType = "MVN";
|
||||
@ -1243,18 +1246,18 @@ struct TorchImporter
|
||||
|
||||
Mat readTorchBlob(const String &filename, bool isBinary)
|
||||
{
|
||||
TorchImporter importer(filename, isBinary);
|
||||
TorchImporter importer(filename, isBinary, true);
|
||||
importer.readObject();
|
||||
CV_Assert(importer.tensors.size() == 1);
|
||||
|
||||
return importer.tensors.begin()->second;
|
||||
}
|
||||
|
||||
Net readNetFromTorch(const String &model, bool isBinary)
|
||||
Net readNetFromTorch(const String &model, bool isBinary, bool evaluate)
|
||||
{
|
||||
CV_TRACE_FUNCTION();
|
||||
|
||||
TorchImporter importer(model, isBinary);
|
||||
TorchImporter importer(model, isBinary, evaluate);
|
||||
Net net;
|
||||
importer.populateNet(net);
|
||||
return net;
|
||||
|
@ -226,9 +226,9 @@ TEST_P(DNNTestNetwork, OpenPose_pose_mpi_faster_4_stages)
|
||||
TEST_P(DNNTestNetwork, OpenFace)
|
||||
{
|
||||
#if defined(INF_ENGINE_RELEASE)
|
||||
#if INF_ENGINE_RELEASE < 2018030000
|
||||
#if (INF_ENGINE_RELEASE < 2018030000 || INF_ENGINE_RELEASE == 2018050000)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
|
||||
throw SkipTestException("Test is enabled starts from OpenVINO 2018R3");
|
||||
throw SkipTestException("");
|
||||
#elif INF_ENGINE_RELEASE < 2018040000
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16)
|
||||
throw SkipTestException("Test is enabled starts from OpenVINO 2018R4");
|
||||
|
@ -190,6 +190,14 @@ TEST_P(DNNTestOpenVINO, models)
|
||||
modelName == "landmarks-regression-retail-0009" ||
|
||||
modelName == "semantic-segmentation-adas-0001")))
|
||||
throw SkipTestException("");
|
||||
#elif INF_ENGINE_RELEASE == 2018050000
|
||||
if (modelName == "single-image-super-resolution-0063" ||
|
||||
modelName == "single-image-super-resolution-1011" ||
|
||||
modelName == "single-image-super-resolution-1021" ||
|
||||
(target == DNN_TARGET_OPENCL_FP16 && modelName == "face-reidentification-retail-0095") ||
|
||||
(target == DNN_TARGET_MYRIAD && (modelName == "license-plate-recognition-barrier-0001" ||
|
||||
modelName == "semantic-segmentation-adas-0001")))
|
||||
throw SkipTestException("");
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
@ -295,6 +295,10 @@ TEST_P(Test_Caffe_layers, Eltwise)
|
||||
{
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
|
||||
throw SkipTestException("");
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018050000
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL)
|
||||
throw SkipTestException("Test is disabled for OpenVINO 2018R5");
|
||||
#endif
|
||||
testLayerUsingCaffeModels("layer_eltwise");
|
||||
}
|
||||
|
||||
|
@ -164,6 +164,8 @@ TEST_P(Test_ONNX_layers, MultyInputs)
|
||||
|
||||
TEST_P(Test_ONNX_layers, DynamicReshape)
|
||||
{
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
|
||||
throw SkipTestException("");
|
||||
testONNXModels("dynamic_reshape");
|
||||
}
|
||||
|
||||
@ -249,6 +251,10 @@ TEST_P(Test_ONNX_nets, VGG16)
|
||||
else if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL) {
|
||||
lInf = 1.2e-4;
|
||||
}
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE >= 2018050000
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16)
|
||||
l1 = 0.131;
|
||||
#endif
|
||||
testONNXModels("vgg16", pb, l1, lInf);
|
||||
}
|
||||
|
||||
@ -327,7 +333,7 @@ TEST_P(Test_ONNX_nets, CNN_MNIST)
|
||||
TEST_P(Test_ONNX_nets, MobileNet_v2)
|
||||
{
|
||||
// output range: [-166; 317]
|
||||
const double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.38 : 7e-5;
|
||||
const double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.4 : 7e-5;
|
||||
const double lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 2.87 : 5e-4;
|
||||
testONNXModels("mobilenetv2", pb, l1, lInf);
|
||||
}
|
||||
@ -350,7 +356,17 @@ TEST_P(Test_ONNX_nets, LResNet100E_IR)
|
||||
|
||||
TEST_P(Test_ONNX_nets, Emotion_ferplus)
|
||||
{
|
||||
testONNXModels("emotion_ferplus", pb);
|
||||
double l1 = default_l1;
|
||||
double lInf = default_lInf;
|
||||
// Output values are in range [-2.01109, 2.11111]
|
||||
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
|
||||
l1 = 0.007;
|
||||
else if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16)
|
||||
{
|
||||
l1 = 0.021;
|
||||
lInf = 0.034;
|
||||
}
|
||||
testONNXModels("emotion_ferplus", pb, l1, lInf);
|
||||
}
|
||||
|
||||
TEST_P(Test_ONNX_nets, Inception_v2)
|
||||
@ -371,6 +387,10 @@ TEST_P(Test_ONNX_nets, DenseNet121)
|
||||
|
||||
TEST_P(Test_ONNX_nets, Inception_v1)
|
||||
{
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018050000
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
|
||||
throw SkipTestException("");
|
||||
#endif
|
||||
testONNXModels("inception_v1", pb);
|
||||
}
|
||||
|
||||
|
@ -241,6 +241,10 @@ TEST_P(Test_TensorFlow_layers, unfused_flatten)
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, leaky_relu)
|
||||
{
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018050000
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL)
|
||||
throw SkipTestException("");
|
||||
#endif
|
||||
runTensorFlowNet("leaky_relu_order1");
|
||||
runTensorFlowNet("leaky_relu_order2");
|
||||
runTensorFlowNet("leaky_relu_order3");
|
||||
@ -383,6 +387,10 @@ TEST_P(Test_TensorFlow_nets, Faster_RCNN)
|
||||
|
||||
TEST_P(Test_TensorFlow_nets, MobileNet_v1_SSD_PPN)
|
||||
{
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018050000
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
|
||||
throw SkipTestException("Unstable test case");
|
||||
#endif
|
||||
checkBackend();
|
||||
std::string proto = findDataFile("dnn/ssd_mobilenet_v1_ppn_coco.pbtxt", false);
|
||||
std::string model = findDataFile("dnn/ssd_mobilenet_v1_ppn_coco.pb", false);
|
||||
@ -560,6 +568,10 @@ TEST_P(Test_TensorFlow_layers, slice)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE &&
|
||||
(target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
|
||||
throw SkipTestException("");
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018050000
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
|
||||
throw SkipTestException("");
|
||||
#endif
|
||||
runTensorFlowNet("slice_4d");
|
||||
}
|
||||
|
||||
|
@ -73,7 +73,7 @@ class Test_Torch_layers : public DNNTestLayer
|
||||
{
|
||||
public:
|
||||
void runTorchNet(const String& prefix, String outLayerName = "",
|
||||
bool check2ndBlob = false, bool isBinary = false,
|
||||
bool check2ndBlob = false, bool isBinary = false, bool evaluate = true,
|
||||
double l1 = 0.0, double lInf = 0.0)
|
||||
{
|
||||
String suffix = (isBinary) ? ".dat" : ".txt";
|
||||
@ -84,7 +84,7 @@ public:
|
||||
|
||||
checkBackend(backend, target, &inp, &outRef);
|
||||
|
||||
Net net = readNetFromTorch(_tf(prefix + "_net" + suffix), isBinary);
|
||||
Net net = readNetFromTorch(_tf(prefix + "_net" + suffix), isBinary, evaluate);
|
||||
ASSERT_FALSE(net.empty());
|
||||
|
||||
net.setPreferableBackend(backend);
|
||||
@ -114,7 +114,7 @@ TEST_P(Test_Torch_layers, run_convolution)
|
||||
// Output reference values are in range [23.4018, 72.0181]
|
||||
double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.08 : default_l1;
|
||||
double lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.42 : default_lInf;
|
||||
runTorchNet("net_conv", "", false, true, l1, lInf);
|
||||
runTorchNet("net_conv", "", false, true, true, l1, lInf);
|
||||
}
|
||||
|
||||
TEST_P(Test_Torch_layers, run_pool_max)
|
||||
@ -147,7 +147,7 @@ TEST_P(Test_Torch_layers, run_reshape)
|
||||
TEST_P(Test_Torch_layers, run_reshape_single_sample)
|
||||
{
|
||||
// Reference output values in range [14.4586, 18.4492].
|
||||
runTorchNet("net_reshape_single_sample", "", false, false,
|
||||
runTorchNet("net_reshape_single_sample", "", false, false, true,
|
||||
(target == DNN_TARGET_MYRIAD || target == DNN_TARGET_OPENCL_FP16) ? 0.0073 : default_l1,
|
||||
(target == DNN_TARGET_MYRIAD || target == DNN_TARGET_OPENCL_FP16) ? 0.025 : default_lInf);
|
||||
}
|
||||
@ -166,7 +166,7 @@ TEST_P(Test_Torch_layers, run_concat)
|
||||
|
||||
TEST_P(Test_Torch_layers, run_depth_concat)
|
||||
{
|
||||
runTorchNet("net_depth_concat", "", false, true, 0.0,
|
||||
runTorchNet("net_depth_concat", "", false, true, true, 0.0,
|
||||
target == DNN_TARGET_OPENCL_FP16 ? 0.021 : 0.0);
|
||||
}
|
||||
|
||||
@ -182,6 +182,7 @@ TEST_P(Test_Torch_layers, run_deconv)
|
||||
TEST_P(Test_Torch_layers, run_batch_norm)
|
||||
{
|
||||
runTorchNet("net_batch_norm", "", false, true);
|
||||
runTorchNet("net_batch_norm_train", "", false, true, false);
|
||||
}
|
||||
|
||||
TEST_P(Test_Torch_layers, net_prelu)
|
||||
@ -216,7 +217,7 @@ TEST_P(Test_Torch_layers, net_conv_gemm_lrn)
|
||||
{
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
|
||||
throw SkipTestException("");
|
||||
runTorchNet("net_conv_gemm_lrn", "", false, true,
|
||||
runTorchNet("net_conv_gemm_lrn", "", false, true, true,
|
||||
target == DNN_TARGET_OPENCL_FP16 ? 0.046 : 0.0,
|
||||
target == DNN_TARGET_OPENCL_FP16 ? 0.023 : 0.0);
|
||||
}
|
||||
@ -266,9 +267,9 @@ class Test_Torch_nets : public DNNTestLayer {};
|
||||
|
||||
TEST_P(Test_Torch_nets, OpenFace_accuracy)
|
||||
{
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018030000
|
||||
#if defined(INF_ENGINE_RELEASE) && (INF_ENGINE_RELEASE < 2018030000 || INF_ENGINE_RELEASE == 2018050000)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
|
||||
throw SkipTestException("Test is enabled starts from OpenVINO 2018R3");
|
||||
throw SkipTestException("");
|
||||
#endif
|
||||
checkBackend();
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16)
|
||||
@ -389,6 +390,10 @@ TEST_P(Test_Torch_nets, ENet_accuracy)
|
||||
// -model models/instance_norm/feathers.t7
|
||||
TEST_P(Test_Torch_nets, FastNeuralStyle_accuracy)
|
||||
{
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018050000
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
|
||||
throw SkipTestException("");
|
||||
#endif
|
||||
checkBackend();
|
||||
std::string models[] = {"dnn/fast_neural_style_eccv16_starry_night.t7",
|
||||
"dnn/fast_neural_style_instance_norm_feathers.t7"};
|
||||
|
@ -77,7 +77,7 @@ void KeyPointsFilter::retainBest(std::vector<KeyPoint>& keypoints, int n_points)
|
||||
return;
|
||||
}
|
||||
//first use nth element to partition the keypoints into the best and worst.
|
||||
std::nth_element(keypoints.begin(), keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreater());
|
||||
std::nth_element(keypoints.begin(), keypoints.begin() + n_points - 1, keypoints.end(), KeypointResponseGreater());
|
||||
//this is the boundary response, and in the case of FAST may be ambiguous
|
||||
float ambiguous_response = keypoints[n_points - 1].response;
|
||||
//use std::partition to grab all of the keypoints with the boundary response.
|
||||
|
38
modules/features2d/test/test_utils.cpp
Normal file
38
modules/features2d/test/test_utils.cpp
Normal file
@ -0,0 +1,38 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
#include "test_precomp.hpp"
|
||||
|
||||
namespace opencv_test { namespace {
|
||||
|
||||
TEST(Features2D_KeypointUtils, retainBest_issue_12594)
|
||||
{
|
||||
const size_t N = 9;
|
||||
|
||||
// Construct 4-way tie for 3rd highest - correct answer for "3 best" is 6
|
||||
const float no_problem[] = { 5.0f, 4.0f, 1.0f, 2.0f, 0.0f, 3.0f, 3.0f, 3.0f, 3.0f };
|
||||
|
||||
// Same set, different order that exposes partial sort property of std::nth_element
|
||||
// Note: the problem case may depend on your particular implementation of STL
|
||||
const float problem[] = { 3.0f, 3.0f, 3.0f, 3.0f, 4.0f, 5.0f, 0.0f, 1.0f, 2.0f };
|
||||
|
||||
const size_t NBEST = 3u;
|
||||
const size_t ANSWER = 6u;
|
||||
|
||||
std::vector<cv::KeyPoint> sorted_cv(N);
|
||||
std::vector<cv::KeyPoint> unsorted_cv(N);
|
||||
|
||||
for (size_t i = 0; i < N; ++i)
|
||||
{
|
||||
sorted_cv[i].response = no_problem[i];
|
||||
unsorted_cv[i].response = problem[i];
|
||||
}
|
||||
|
||||
cv::KeyPointsFilter::retainBest(sorted_cv, NBEST);
|
||||
cv::KeyPointsFilter::retainBest(unsorted_cv, NBEST);
|
||||
|
||||
EXPECT_EQ(ANSWER, sorted_cv.size());
|
||||
EXPECT_EQ(ANSWER, unsorted_cv.size());
|
||||
}
|
||||
|
||||
}} // namespace
|
File diff suppressed because it is too large
Load Diff
@ -282,10 +282,10 @@ medianBlur_8u_O1( const Mat& _src, Mat& _dst, int ksize )
|
||||
for ( ; luc[c][k] < j+r+1; ++luc[c][k] )
|
||||
{
|
||||
#if CV_SIMD256
|
||||
v_fine += v256_load(px + 16 * MIN(luc[c][k], n - 1)) - v256_load(px + 16 * MAX(luc[c][k] - 2 * r - 1, 0));
|
||||
v_fine = v_fine + v256_load(px + 16 * MIN(luc[c][k], n - 1)) - v256_load(px + 16 * MAX(luc[c][k] - 2 * r - 1, 0));
|
||||
#elif CV_SIMD128
|
||||
v_finel += v_load(px + 16 * MIN(luc[c][k], n - 1) ) - v_load(px + 16 * MAX(luc[c][k] - 2 * r - 1, 0));
|
||||
v_fineh += v_load(px + 16 * MIN(luc[c][k], n - 1) + 8) - v_load(px + 16 * MAX(luc[c][k] - 2 * r - 1, 0) + 8);
|
||||
v_finel = v_finel + v_load(px + 16 * MIN(luc[c][k], n - 1) ) - v_load(px + 16 * MAX(luc[c][k] - 2 * r - 1, 0));
|
||||
v_fineh = v_fineh + v_load(px + 16 * MIN(luc[c][k], n - 1) + 8) - v_load(px + 16 * MAX(luc[c][k] - 2 * r - 1, 0) + 8);
|
||||
#else
|
||||
for (int ind = 0; ind < 16; ++ind)
|
||||
H[c].fine[k][ind] += px[16 * MIN(luc[c][k], n - 1) + ind] - px[16 * MAX(luc[c][k] - 2 * r - 1, 0) + ind];
|
||||
@ -321,10 +321,10 @@ medianBlur_8u_O1( const Mat& _src, Mat& _dst, int ksize )
|
||||
CV_Assert( b < 16 );
|
||||
}
|
||||
}
|
||||
#if CV_SIMD
|
||||
vx_cleanup();
|
||||
#endif
|
||||
}
|
||||
#if CV_SIMD
|
||||
vx_cleanup();
|
||||
#endif
|
||||
}
|
||||
|
||||
#undef HOP
|
||||
|
@ -2200,4 +2200,15 @@ TEST(Imgproc_Filter2D, dftFilter2d_regression_10683)
|
||||
|
||||
EXPECT_LE(cvtest::norm(dst, expected, NORM_INF), 2);
|
||||
}
|
||||
|
||||
TEST(Imgproc_MedianBlur, hires_regression_13409)
|
||||
{
|
||||
Mat src(2048, 2048, CV_8UC1), dst_hires, dst_ref;
|
||||
randu(src, 0, 256);
|
||||
|
||||
medianBlur(src, dst_hires, 9);
|
||||
medianBlur(src(Rect(512, 512, 1024, 1024)), dst_ref, 9);
|
||||
|
||||
ASSERT_EQ(0.0, cvtest::norm(dst_hires(Rect(516, 516, 1016, 1016)), dst_ref(Rect(4, 4, 1016, 1016)), NORM_INF));
|
||||
}
|
||||
}} // namespace
|
||||
|
@ -18,6 +18,13 @@ set(depends gen_opencv_java_source "${OPENCV_DEPHELPER}/gen_opencv_java_source")
|
||||
ocv_copyfiles_add_target(${the_module}_jar_source_copy JAVA_SRC_COPY "Copy Java(JAR) source files" ${depends})
|
||||
set(depends ${the_module}_jar_source_copy "${OPENCV_DEPHELPER}/${the_module}_jar_source_copy")
|
||||
|
||||
if(OPENCV_JAVA_SOURCE_VERSION)
|
||||
set(OPENCV_ANT_JAVAC_EXTRA_ATTRS "${OPENCV_ANT_JAVAC_EXTRA_ATTRS} source=\"${OPENCV_JAVA_SOURCE_VERSION}\"")
|
||||
endif()
|
||||
if(OPENCV_JAVA_TARGET_VERSION)
|
||||
set(OPENCV_ANT_JAVAC_EXTRA_ATTRS "${OPENCV_ANT_JAVAC_EXTRA_ATTRS} target=\"${OPENCV_JAVA_TARGET_VERSION}\"")
|
||||
endif()
|
||||
|
||||
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/build.xml.in" "${OPENCV_JAVA_DIR}/build.xml" @ONLY)
|
||||
list(APPEND depends "${OPENCV_JAVA_DIR}/build.xml")
|
||||
|
||||
|
@ -11,7 +11,7 @@
|
||||
<!-- This is to make a jar with a source attachment, for e.g. easy -->
|
||||
<!-- navigation in Eclipse. See this question: -->
|
||||
<!-- http://stackoverflow.com/questions/3584968/ant-how-to-compile-jar-that-includes-source-attachment -->
|
||||
<javac sourcepath="" srcdir="java" destdir="build/classes" debug="on" includeantruntime="false" >
|
||||
<javac sourcepath="" srcdir="java" destdir="build/classes" debug="on" includeantruntime="false" @OPENCV_ANT_JAVAC_EXTRA_ATTRS@ >
|
||||
<include name="**/*.java"/>
|
||||
<compilerarg line="-encoding utf-8"/>
|
||||
</javac>
|
||||
|
@ -782,6 +782,9 @@ bool QRCodeDetector::detect(InputArray in, OutputArray points) const
|
||||
Mat inarr = in.getMat();
|
||||
CV_Assert(!inarr.empty());
|
||||
CV_Assert(inarr.depth() == CV_8U);
|
||||
if (inarr.cols <= 20 || inarr.rows <= 20)
|
||||
return false; // image data is not enough for providing reliable results
|
||||
|
||||
int incn = inarr.channels();
|
||||
if( incn == 3 || incn == 4 )
|
||||
{
|
||||
@ -1054,6 +1057,8 @@ std::string QRCodeDetector::decode(InputArray in, InputArray points,
|
||||
Mat inarr = in.getMat();
|
||||
CV_Assert(!inarr.empty());
|
||||
CV_Assert(inarr.depth() == CV_8U);
|
||||
if (inarr.cols <= 20 || inarr.rows <= 20)
|
||||
return cv::String(); // image data is not enough for providing reliable results
|
||||
|
||||
int incn = inarr.channels();
|
||||
if( incn == 3 || incn == 4 )
|
||||
@ -1092,6 +1097,8 @@ std::string QRCodeDetector::detectAndDecode(InputArray in,
|
||||
Mat inarr = in.getMat();
|
||||
CV_Assert(!inarr.empty());
|
||||
CV_Assert(inarr.depth() == CV_8U);
|
||||
if (inarr.cols <= 20 || inarr.rows <= 20)
|
||||
return cv::String(); // image data is not enough for providing reliable results
|
||||
|
||||
int incn = inarr.channels();
|
||||
if( incn == 3 || incn == 4 )
|
||||
|
@ -237,6 +237,11 @@ make & enjoy!
|
||||
#include <sys/videoio.h>
|
||||
#endif
|
||||
|
||||
// https://github.com/opencv/opencv/issues/13335
|
||||
#ifndef V4L2_CID_ISO_SENSITIVITY
|
||||
#define V4L2_CID_ISO_SENSITIVITY (V4L2_CID_CAMERA_CLASS_BASE+23)
|
||||
#endif
|
||||
|
||||
/* Defaults - If your board can do better, set it here. Set for the most common type inputs. */
|
||||
#define DEFAULT_V4L_WIDTH 640
|
||||
#define DEFAULT_V4L_HEIGHT 480
|
||||
@ -1757,7 +1762,7 @@ bool CvCaptureCAM_V4L::icvSetFrameSize(int _width, int _height)
|
||||
if (_width > 0)
|
||||
width_set = _width;
|
||||
|
||||
if (height > 0)
|
||||
if (_height > 0)
|
||||
height_set = _height;
|
||||
|
||||
/* two subsequent calls setting WIDTH and HEIGHT will change
|
||||
|
@ -11,7 +11,7 @@
|
||||
|
||||
namespace opencv_test { namespace {
|
||||
|
||||
static void test_readFrames(/*const*/ VideoCapture& capture, const int N = 100)
|
||||
static void test_readFrames(/*const*/ VideoCapture& capture, const int N = 100, Mat* lastFrame = NULL)
|
||||
{
|
||||
Mat frame;
|
||||
int64 time0 = cv::getTickCount();
|
||||
@ -26,6 +26,7 @@ static void test_readFrames(/*const*/ VideoCapture& capture, const int N = 100)
|
||||
}
|
||||
int64 time1 = cv::getTickCount();
|
||||
printf("Processed %d frames on %.2f FPS\n", N, (N * cv::getTickFrequency()) / (time1 - time0 + 1));
|
||||
if (lastFrame) *lastFrame = frame.clone();
|
||||
}
|
||||
|
||||
TEST(DISABLED_VideoIO_Camera, basic)
|
||||
@ -69,4 +70,39 @@ TEST(DISABLED_VideoIO_Camera, dshow_avermedia_capture)
|
||||
capture.release();
|
||||
}
|
||||
|
||||
TEST(DISABLED_VideoIO_Camera, validate_V4L2_FrameSize)
|
||||
{
|
||||
VideoCapture capture(CAP_V4L2);
|
||||
ASSERT_TRUE(capture.isOpened());
|
||||
std::cout << "Camera 0 via " << capture.getBackendName() << " backend" << std::endl;
|
||||
std::cout << "Frame width: " << capture.get(CAP_PROP_FRAME_WIDTH) << std::endl;
|
||||
std::cout << " height: " << capture.get(CAP_PROP_FRAME_HEIGHT) << std::endl;
|
||||
std::cout << "Capturing FPS: " << capture.get(CAP_PROP_FPS) << std::endl;
|
||||
int fourcc = (int)capture.get(CAP_PROP_FOURCC);
|
||||
std::cout << "FOURCC code: " << cv::format("0x%8x", fourcc) << std::endl;
|
||||
test_readFrames(capture, 30);
|
||||
|
||||
EXPECT_TRUE(capture.set(CAP_PROP_FRAME_WIDTH, 640));
|
||||
EXPECT_TRUE(capture.set(CAP_PROP_FRAME_HEIGHT, 480));
|
||||
std::cout << "Frame width: " << capture.get(CAP_PROP_FRAME_WIDTH) << std::endl;
|
||||
std::cout << " height: " << capture.get(CAP_PROP_FRAME_HEIGHT) << std::endl;
|
||||
std::cout << "Capturing FPS: " << capture.get(CAP_PROP_FPS) << std::endl;
|
||||
Mat frame640x480;
|
||||
test_readFrames(capture, 30, &frame640x480);
|
||||
EXPECT_EQ(640, frame640x480.cols);
|
||||
EXPECT_EQ(480, frame640x480.rows);
|
||||
|
||||
EXPECT_TRUE(capture.set(CAP_PROP_FRAME_WIDTH, 1280));
|
||||
EXPECT_TRUE(capture.set(CAP_PROP_FRAME_HEIGHT, 720));
|
||||
std::cout << "Frame width: " << capture.get(CAP_PROP_FRAME_WIDTH) << std::endl;
|
||||
std::cout << " height: " << capture.get(CAP_PROP_FRAME_HEIGHT) << std::endl;
|
||||
std::cout << "Capturing FPS: " << capture.get(CAP_PROP_FPS) << std::endl;
|
||||
Mat frame1280x720;
|
||||
test_readFrames(capture, 30, &frame1280x720);
|
||||
EXPECT_EQ(1280, frame1280x720.cols);
|
||||
EXPECT_EQ(720, frame1280x720.rows);
|
||||
|
||||
capture.release();
|
||||
}
|
||||
|
||||
}} // namespace
|
||||
|
@ -1,8 +1,8 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
|
||||
package="org.opencv.engine"
|
||||
android:versionCode="344@ANDROID_PLATFORM_ID@"
|
||||
android:versionName="3.44">
|
||||
android:versionCode="345@ANDROID_PLATFORM_ID@"
|
||||
android:versionName="3.45">
|
||||
|
||||
<uses-sdk android:minSdkVersion="@ANDROID_NATIVE_API_LEVEL@" android:targetSdkVersion="22"/>
|
||||
<uses-feature android:name="android.hardware.touchscreen" android:required="false"/>
|
||||
|
@ -137,7 +137,7 @@ public class OpenCVEngineService extends Service {
|
||||
|
||||
@Override
|
||||
public int getEngineVersion() throws RemoteException {
|
||||
int version = 3440;
|
||||
int version = 3450;
|
||||
try {
|
||||
version = getPackageManager().getPackageInfo(getPackageName(), 0).versionCode;
|
||||
} catch (NameNotFoundException e) {
|
||||
|
@ -12,7 +12,7 @@ manually using adb tool:
|
||||
|
||||
adb install <path-to-OpenCV-sdk>/apk/OpenCV_<version>_Manager_<app_version>_<platform>.apk
|
||||
|
||||
Example: OpenCV_3.4.4-dev_Manager_3.44_armeabi-v7a.apk
|
||||
Example: OpenCV_3.4.5-dev_Manager_3.45_armeabi-v7a.apk
|
||||
|
||||
Use the list of platforms below to determine proper OpenCV Manager package for your device:
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user