Merge pull request #12354 from alalek:samples_find_file

This commit is contained in:
Alexander Alekhin 2018-11-16 22:40:49 +03:00 committed by GitHub
commit 1d5a528107
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
145 changed files with 1296 additions and 503 deletions

View File

@ -82,3 +82,47 @@ ocv_add_accuracy_tests()
ocv_add_perf_tests()
ocv_install_3rdparty_licenses(SoftFloat "${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/SoftFloat/COPYING.txt")
# generate data (samples data) config file
set(OPENCV_DATA_CONFIG_FILE "${CMAKE_BINARY_DIR}/opencv_data_config.hpp")
set(OPENCV_DATA_CONFIG_STR "")
if(CMAKE_INSTALL_PREFIX)
set(OPENCV_DATA_CONFIG_STR "${OPENCV_DATA_CONFIG_STR}
#define OPENCV_INSTALL_PREFIX \"${CMAKE_INSTALL_PREFIX}\"
")
endif()
if(OPENCV_OTHER_INSTALL_PATH)
set(OPENCV_DATA_CONFIG_STR "${OPENCV_DATA_CONFIG_STR}
#define OPENCV_DATA_INSTALL_PATH \"${OPENCV_OTHER_INSTALL_PATH}\"
")
endif()
set(OPENCV_DATA_CONFIG_STR "${OPENCV_DATA_CONFIG_STR}
#define OPENCV_BUILD_DIR \"${CMAKE_BINARY_DIR}\"
")
file(RELATIVE_PATH SOURCE_DIR_RELATIVE ${CMAKE_BINARY_DIR} ${CMAKE_SOURCE_DIR})
set(OPENCV_DATA_CONFIG_STR "${OPENCV_DATA_CONFIG_STR}
#define OPENCV_DATA_BUILD_DIR_SEARCH_PATHS \\
\"${SOURCE_DIR_RELATIVE}/\"
")
if(WIN32)
file(RELATIVE_PATH INSTALL_DATA_DIR_RELATIVE "${CMAKE_INSTALL_PREFIX}/${OPENCV_BIN_INSTALL_PATH}" "${CMAKE_INSTALL_PREFIX}/${OPENCV_OTHER_INSTALL_PATH}")
else()
file(RELATIVE_PATH INSTALL_DATA_DIR_RELATIVE "${CMAKE_INSTALL_PREFIX}/${OPENCV_LIB_INSTALL_PATH}" "${CMAKE_INSTALL_PREFIX}/${OPENCV_OTHER_INSTALL_PATH}")
endif()
list(APPEND OPENCV_INSTALL_DATA_DIR_RELATIVE "${INSTALL_DATA_DIR_RELATIVE}")
string(REPLACE ";" "\",\\\n \"" OPENCV_INSTALL_DATA_DIR_RELATIVE_STR "\"${OPENCV_INSTALL_DATA_DIR_RELATIVE}\"")
set(OPENCV_DATA_CONFIG_STR "${OPENCV_DATA_CONFIG_STR}
#define OPENCV_INSTALL_DATA_DIR_RELATIVE ${OPENCV_INSTALL_DATA_DIR_RELATIVE_STR}
")
if(EXISTS "${OPENCV_DATA_CONFIG_FILE}")
file(READ "${OPENCV_DATA_CONFIG_FILE}" __content)
endif()
if(NOT OPENCV_DATA_CONFIG_STR STREQUAL "${__content}")
file(WRITE "${OPENCV_DATA_CONFIG_FILE}" "${OPENCV_DATA_CONFIG_STR}")
endif()

View File

@ -75,6 +75,7 @@
@defgroup core_utils_sse SSE utilities
@defgroup core_utils_neon NEON utilities
@defgroup core_utils_softfloat Softfloat support
@defgroup core_utils_samples Utility functions for OpenCV samples
@}
@defgroup core_opengl OpenGL interoperability
@defgroup core_ipp Intel IPP Asynchronous C/C++ Converters

View File

@ -801,6 +801,82 @@ CV_EXPORTS InstrNode* getCurrentNode();
#define CV_INSTRUMENT_REGION(); CV_INSTRUMENT_REGION_();
#endif
namespace cv {
namespace utils {
//! @addtogroup core_utils
//! @{
/** @brief Try to find requested data file
Search directories:
1. Directories passed via `addDataSearchPath()`
2. Check path specified by configuration parameter with "_HINT" suffix (name of environment variable).
3. Check path specified by configuration parameter (name of environment variable).
If parameter value is not empty and nothing is found then stop searching.
4. Detects build/install path based on:
a. current working directory (CWD)
b. and/or binary module location (opencv_core/opencv_world, doesn't work with static linkage)
5. Scan `<source>/{,data}` directories if build directory is detected or the current directory is in source tree.
6. Scan `<install>/share/OpenCV` directory if install directory is detected.
@param relative_path Relative path to data file
@param required Specify "file not found" handling.
If true, function prints information message and raises cv::Exception.
If false, function returns empty result
@param configuration_parameter specify configuration parameter name. Default NULL value means "OPENCV_DATA_PATH".
@return Returns path (absolute or relative to the current directory) or empty string if file is not found
@note Implementation is not thread-safe.
*/
CV_EXPORTS
cv::String findDataFile(const cv::String& relative_path, bool required = true,
const char* configuration_parameter = NULL);
/** @overload
@param relative_path Relative path to data file
@param configuration_parameter specify configuration parameter name. Default NULL value means "OPENCV_DATA_PATH".
@param search_paths override addDataSearchPath() settings.
@param subdir_paths override addDataSearchSubDirectory() settings.
@return Returns path (absolute or relative to the current directory) or empty string if file is not found
@note Implementation is not thread-safe.
*/
CV_EXPORTS
cv::String findDataFile(const cv::String& relative_path,
const char* configuration_parameter,
const std::vector<String>* search_paths,
const std::vector<String>* subdir_paths);
/** @brief Override default search data path by adding new search location
Use this only to override default behavior
Passed paths are used in LIFO order.
@param path Path to used samples data
@note Implementation is not thread-safe.
*/
CV_EXPORTS void addDataSearchPath(const cv::String& path);
/** @brief Append default search data sub directory
General usage is to add OpenCV modules name (`<opencv_contrib>/modules/<name>/data` -> `modules/<name>/data` + `<name>/data`).
Passed subdirectories are used in LIFO order.
@param subdir samples data sub directory
@note Implementation is not thread-safe.
*/
CV_EXPORTS void addDataSearchSubDirectory(const cv::String& subdir);
//! @}
} // namespace utils
} // namespace cv
//! @endcond
#endif // OPENCV_CORE_PRIVATE_HPP

View File

@ -1274,8 +1274,75 @@ enum FLAGS
CV_EXPORTS void setFlags(FLAGS modeFlags);
static inline void setFlags(int modeFlags) { setFlags((FLAGS)modeFlags); }
CV_EXPORTS FLAGS getFlags();
} // namespace instr
namespace samples {
//! @addtogroup core_utils_samples
// This section describes utility functions for OpenCV samples.
//
// @note Implementation of these utilities is not thread-safe.
//
//! @{
/** @brief Try to find requested data file
Search directories:
1. Directories passed via `addSamplesDataSearchPath()`
2. OPENCV_SAMPLES_DATA_PATH_HINT environment variable
3. OPENCV_SAMPLES_DATA_PATH environment variable
If parameter value is not empty and nothing is found then stop searching.
4. Detects build/install path based on:
a. current working directory (CWD)
b. and/or binary module location (opencv_core/opencv_world, doesn't work with static linkage)
5. Scan `<source>/{,data,samples/data}` directories if build directory is detected or the current directory is in source tree.
6. Scan `<install>/share/OpenCV` directory if install directory is detected.
@see cv::utils::findDataFile
@param relative_path Relative path to data file
@param required Specify "file not found" handling.
If true, function prints information message and raises cv::Exception.
If false, function returns empty result
@param silentMode Disables messages
@return Returns path (absolute or relative to the current directory) or empty string if file is not found
*/
CV_EXPORTS_W cv::String findFile(const cv::String& relative_path, bool required = true, bool silentMode = false);
CV_EXPORTS_W cv::String findFileOrKeep(const cv::String& relative_path, bool silentMode = false);
inline cv::String findFileOrKeep(const cv::String& relative_path, bool silentMode)
{
cv::String res = findFile(relative_path, false, silentMode);
if (res.empty())
return relative_path;
return res;
}
/** @brief Override search data path by adding new search location
Use this only to override default behavior
Passed paths are used in LIFO order.
@param path Path to used samples data
*/
CV_EXPORTS_W void addSamplesDataSearchPath(const cv::String& path);
/** @brief Append samples search data sub directory
General usage is to add OpenCV modules name (`<opencv_contrib>/modules/<name>/samples/data` -> `<name>/samples/data` + `modules/<name>/samples/data`).
Passed subdirectories are used in LIFO order.
@param subdir samples data sub directory
*/
CV_EXPORTS_W void addSamplesDataSearchSubDirectory(const cv::String& subdir);
//! @}
} // namespace samples
namespace utils {
CV_EXPORTS int getThreadID();

View File

@ -16,6 +16,13 @@ CV_EXPORTS void remove_all(const cv::String& path);
CV_EXPORTS cv::String getcwd();
/** @brief Converts path p to a canonical absolute path
* Symlinks are processed if there is support for them on running platform.
*
* @param path input path. Target file/directory should exist.
*/
CV_EXPORTS cv::String canonical(const cv::String& path);
/** Join path components */
CV_EXPORTS cv::String join(const cv::String& base, const cv::String& path);

View File

@ -0,0 +1,398 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "../precomp.hpp"
#include "opencv_data_config.hpp"
#include <vector>
#include <fstream>
#include <opencv2/core/utils/logger.defines.hpp>
#undef CV_LOG_STRIP_LEVEL
#define CV_LOG_STRIP_LEVEL CV_LOG_LEVEL_VERBOSE + 1
#include "opencv2/core/utils/logger.hpp"
#include "opencv2/core/utils/filesystem.hpp"
#include <opencv2/core/utils/configuration.private.hpp>
#ifdef _WIN32
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#undef small
#undef min
#undef max
#undef abs
#elif defined(__APPLE__)
#include <TargetConditionals.h>
#if TARGET_OS_MAC
#include <dlfcn.h>
#endif
#endif
namespace cv { namespace utils {
static cv::Ptr< std::vector<cv::String> > g_data_search_path;
static cv::Ptr< std::vector<cv::String> > g_data_search_subdir;
static std::vector<cv::String>& _getDataSearchPath()
{
if (g_data_search_path.empty())
g_data_search_path.reset(new std::vector<cv::String>());
return *(g_data_search_path.get());
}
static std::vector<cv::String>& _getDataSearchSubDirectory()
{
if (g_data_search_subdir.empty())
{
g_data_search_subdir.reset(new std::vector<cv::String>());
g_data_search_subdir->push_back("data");
g_data_search_subdir->push_back("");
}
return *(g_data_search_subdir.get());
}
CV_EXPORTS void addDataSearchPath(const cv::String& path)
{
if (utils::fs::isDirectory(path))
_getDataSearchPath().push_back(path);
}
CV_EXPORTS void addDataSearchSubDirectory(const cv::String& subdir)
{
_getDataSearchSubDirectory().push_back(subdir);
}
static bool isPathSep(char c)
{
return c == '/' || c == '\\';
}
static bool isSubDirectory_(const cv::String& base_path, const cv::String& path)
{
size_t N = base_path.size();
if (N == 0)
return false;
if (isPathSep(base_path[N - 1]))
N--;
if (path.size() < N)
return false;
for (size_t i = 0; i < N; i++)
{
if (path[i] == base_path[i])
continue;
if (isPathSep(path[i]) && isPathSep(base_path[i]))
continue;
return false;
}
size_t M = path.size();
if (M > N)
{
if (!isPathSep(path[N]))
return false;
}
return true;
}
static bool isSubDirectory(const cv::String& base_path, const cv::String& path)
{
bool res = isSubDirectory_(base_path, path);
CV_LOG_VERBOSE(NULL, 0, "isSubDirectory(): base: " << base_path << " path: " << path << " => result: " << (res ? "TRUE" : "FALSE"));
return res;
}
static cv::String getModuleLocation(const void* addr)
{
CV_UNUSED(addr);
#ifdef _WIN32
HMODULE m = 0;
#if _WIN32_WINNT >= 0x0501
::GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
reinterpret_cast<LPCTSTR>(addr),
&m);
#endif
if (m)
{
char path[MAX_PATH];
const size_t path_size = sizeof(path)/sizeof(*path);
size_t sz = GetModuleFileNameA(m, path, path_size); // no unicode support
if (sz > 0 && sz < path_size)
{
path[sz] = '\0';
return cv::String(path);
}
}
#elif defined(__linux__)
std::ifstream fs("/proc/self/maps");
std::string line;
while (std::getline(fs, line, '\n'))
{
long long int addr_begin = 0, addr_end = 0;
if (2 == sscanf(line.c_str(), "%llx-%llx", &addr_begin, &addr_end))
{
if ((intptr_t)addr >= (intptr_t)addr_begin && (intptr_t)addr < (intptr_t)addr_end)
{
size_t pos = line.rfind(" "); // 2 spaces
if (pos == cv::String::npos)
pos = line.rfind(' '); // 1 spaces
else
pos++;
if (pos == cv::String::npos)
{
CV_LOG_DEBUG(NULL, "Can't parse module path: '" << line << '\'');
}
return line.substr(pos + 1);
}
}
}
#elif defined(__APPLE__)
# if TARGET_OS_MAC
Dl_info info;
if (0 != dladdr(addr, &info))
{
return cv::String(info.dli_fname);
}
# endif
#else
// not supported, skip
#endif
return cv::String();
}
cv::String findDataFile(const cv::String& relative_path,
const char* configuration_parameter,
const std::vector<String>* search_paths,
const std::vector<String>* subdir_paths)
{
configuration_parameter = configuration_parameter ? configuration_parameter : "OPENCV_DATA_PATH";
CV_LOG_DEBUG(NULL, cv::format("utils::findDataFile('%s', %s)", relative_path.c_str(), configuration_parameter));
#define TRY_FILE_WITH_PREFIX(prefix) \
{ \
cv::String path = utils::fs::join(prefix, relative_path); \
CV_LOG_DEBUG(NULL, cv::format("... Line %d: trying open '%s'", __LINE__, path.c_str())); \
FILE* f = fopen(path.c_str(), "rb"); \
if(f) { \
fclose(f); \
return path; \
} \
}
// Step 0: check current directory or absolute path at first
TRY_FILE_WITH_PREFIX("");
// Step 1
const std::vector<cv::String>& search_path = search_paths ? *search_paths : _getDataSearchPath();
for(size_t i = search_path.size(); i > 0; i--)
{
const cv::String& prefix = search_path[i - 1];
TRY_FILE_WITH_PREFIX(prefix);
}
const std::vector<cv::String>& search_subdir = subdir_paths ? *subdir_paths : _getDataSearchSubDirectory();
// Step 2
const cv::String configuration_parameter_s(configuration_parameter ? configuration_parameter : "");
const cv::utils::Paths& search_hint = configuration_parameter_s.empty() ? cv::utils::Paths()
: getConfigurationParameterPaths((configuration_parameter_s + "_HINT").c_str());
for (size_t k = 0; k < search_hint.size(); k++)
{
cv::String datapath = search_hint[k];
if (datapath.empty())
continue;
if (utils::fs::isDirectory(datapath))
{
CV_LOG_DEBUG(NULL, "utils::findDataFile(): trying " << configuration_parameter << "_HINT=" << datapath);
for(size_t i = search_subdir.size(); i > 0; i--)
{
const cv::String& subdir = search_subdir[i - 1];
cv::String prefix = utils::fs::join(datapath, subdir);
TRY_FILE_WITH_PREFIX(prefix);
}
}
else
{
CV_LOG_WARNING(NULL, configuration_parameter << "_HINT is specified but it is not a directory: " << datapath);
}
}
// Step 3
const cv::utils::Paths& override_paths = configuration_parameter_s.empty() ? cv::utils::Paths()
: getConfigurationParameterPaths(configuration_parameter);
for (size_t k = 0; k < override_paths.size(); k++)
{
cv::String datapath = override_paths[k];
if (datapath.empty())
continue;
if (utils::fs::isDirectory(datapath))
{
CV_LOG_DEBUG(NULL, "utils::findDataFile(): trying " << configuration_parameter << "=" << datapath);
for(size_t i = search_subdir.size(); i > 0; i--)
{
const cv::String& subdir = search_subdir[i - 1];
cv::String prefix = utils::fs::join(datapath, subdir);
TRY_FILE_WITH_PREFIX(prefix);
}
}
else
{
CV_LOG_WARNING(NULL, configuration_parameter << " is specified but it is not a directory: " << datapath);
}
}
if (!override_paths.empty())
{
CV_LOG_INFO(NULL, "utils::findDataFile(): can't find data file via " << configuration_parameter << " configuration override: " << relative_path);
return cv::String();
}
// Steps: 4, 5, 6
cv::String cwd = utils::fs::getcwd();
cv::String build_dir(OPENCV_BUILD_DIR);
bool has_tested_build_directory = false;
if (isSubDirectory(build_dir, cwd) || isSubDirectory(utils::fs::canonical(build_dir), utils::fs::canonical(cwd)))
{
CV_LOG_DEBUG(NULL, "utils::findDataFile(): the current directory is build sub-directory: " << cwd);
const char* build_subdirs[] = { OPENCV_DATA_BUILD_DIR_SEARCH_PATHS };
for (size_t k = 0; k < sizeof(build_subdirs)/sizeof(build_subdirs[0]); k++)
{
CV_LOG_DEBUG(NULL, "utils::findDataFile(): <build>/" << build_subdirs[k]);
cv::String datapath = utils::fs::join(build_dir, build_subdirs[k]);
if (utils::fs::isDirectory(datapath))
{
for(size_t i = search_subdir.size(); i > 0; i--)
{
const cv::String& subdir = search_subdir[i - 1];
cv::String prefix = utils::fs::join(datapath, subdir);
TRY_FILE_WITH_PREFIX(prefix);
}
}
}
has_tested_build_directory = true;
}
cv::String source_dir;
cv::String try_source_dir = cwd;
for (int levels = 0; levels < 3; ++levels)
{
if (utils::fs::exists(utils::fs::join(try_source_dir, "modules/core/include/opencv2/core/version.hpp")))
{
source_dir = try_source_dir;
break;
}
try_source_dir = utils::fs::join(try_source_dir, "/..");
}
if (!source_dir.empty())
{
CV_LOG_DEBUG(NULL, "utils::findDataFile(): the current directory is source sub-directory: " << source_dir);
CV_LOG_DEBUG(NULL, "utils::findDataFile(): <source>" << source_dir);
cv::String datapath = source_dir;
if (utils::fs::isDirectory(datapath))
{
for(size_t i = search_subdir.size(); i > 0; i--)
{
const cv::String& subdir = search_subdir[i - 1];
cv::String prefix = utils::fs::join(datapath, subdir);
TRY_FILE_WITH_PREFIX(prefix);
}
}
}
cv::String module_path = getModuleLocation((void*)getModuleLocation); // use code addr, doesn't work with static linkage!
CV_LOG_DEBUG(NULL, "Detected module path: '" << module_path << '\'');
if (!has_tested_build_directory &&
(isSubDirectory(build_dir, module_path) || isSubDirectory(utils::fs::canonical(build_dir), utils::fs::canonical(module_path)))
)
{
CV_LOG_DEBUG(NULL, "utils::findDataFile(): the binary module directory is build sub-directory: " << module_path);
const char* build_subdirs[] = { OPENCV_DATA_BUILD_DIR_SEARCH_PATHS };
for (size_t k = 0; k < sizeof(build_subdirs)/sizeof(build_subdirs[0]); k++)
{
CV_LOG_DEBUG(NULL, "utils::findDataFile(): <build>/" << build_subdirs[k]);
cv::String datapath = utils::fs::join(build_dir, build_subdirs[k]);
if (utils::fs::isDirectory(datapath))
{
for(size_t i = search_subdir.size(); i > 0; i--)
{
const cv::String& subdir = search_subdir[i - 1];
cv::String prefix = utils::fs::join(datapath, subdir);
TRY_FILE_WITH_PREFIX(prefix);
}
}
}
}
#if defined OPENCV_INSTALL_DATA_DIR_RELATIVE
if (!module_path.empty()) // require module path
{
size_t pos = module_path.rfind('/');
if (pos == cv::String::npos)
pos = module_path.rfind('\\');
cv::String module_dir = (pos == cv::String::npos) ? module_path : module_path.substr(0, pos);
const char* install_subdirs[] = { OPENCV_INSTALL_DATA_DIR_RELATIVE };
for (size_t k = 0; k < sizeof(install_subdirs)/sizeof(install_subdirs[0]); k++)
{
cv::String datapath = utils::fs::join(module_dir, install_subdirs[k]);
CV_LOG_DEBUG(NULL, "utils::findDataFile(): trying install path (from binary path): " << datapath);
if (utils::fs::isDirectory(datapath))
{
for(size_t i = search_subdir.size(); i > 0; i--)
{
const cv::String& subdir = search_subdir[i - 1];
cv::String prefix = utils::fs::join(datapath, subdir);
TRY_FILE_WITH_PREFIX(prefix);
}
}
else
{
CV_LOG_DEBUG(NULL, "utils::findDataFile(): ... skip, not a valid directory: " << datapath);
}
}
}
#endif
#if defined OPENCV_INSTALL_PREFIX && defined OPENCV_DATA_INSTALL_PATH
cv::String install_dir(OPENCV_INSTALL_PREFIX);
// use core/world module path and verify that library is running from installation directory
// It is neccessary to avoid touching of unrelated common /usr/local path
if (module_path.empty()) // can't determine
module_path = install_dir;
if (isSubDirectory(install_dir, module_path) || isSubDirectory(utils::fs::canonical(install_dir), utils::fs::canonical(module_path)))
{
cv::String datapath = utils::fs::join(install_dir, OPENCV_DATA_INSTALL_PATH);
if (utils::fs::isDirectory(datapath))
{
CV_LOG_DEBUG(NULL, "utils::findDataFile(): trying install path: " << datapath);
for(size_t i = search_subdir.size(); i > 0; i--)
{
const cv::String& subdir = search_subdir[i - 1];
cv::String prefix = utils::fs::join(datapath, subdir);
TRY_FILE_WITH_PREFIX(prefix);
}
}
}
#endif
return cv::String(); // not found
}
cv::String findDataFile(const cv::String& relative_path, bool required, const char* configuration_parameter)
{
CV_LOG_DEBUG(NULL, cv::format("cv::utils::findDataFile('%s', %s, %s)",
relative_path.c_str(), required ? "true" : "false",
configuration_parameter ? configuration_parameter : "NULL"));
cv::String result = cv::utils::findDataFile(relative_path,
configuration_parameter,
NULL,
NULL);
if (result.empty() && required)
CV_Error(cv::Error::StsError, cv::format("OpenCV: Can't find required data file: %s", relative_path.c_str()));
return result;
}
}} // namespace

View File

@ -85,6 +85,20 @@ cv::String join(const cv::String& base, const cv::String& path)
#if OPENCV_HAVE_FILESYSTEM_SUPPORT
cv::String canonical(const cv::String& path)
{
#ifdef _WIN32
const char* result = _fullpath(NULL, path.c_str(), 0);
#else
const char* result = realpath(path.c_str(), NULL);
#endif
if (result)
return cv::String(result);
// no error handling, just return input
return path;
}
bool exists(const cv::String& path)
{
CV_INSTRUMENT_REGION();
@ -543,11 +557,12 @@ cv::String getCacheDirectory(const char* sub_directory_name, const char* configu
#else
#define NOT_IMPLEMENTED CV_Error(Error::StsNotImplemented, "");
CV_EXPORTS bool exists(const cv::String& /*path*/) { NOT_IMPLEMENTED }
CV_EXPORTS void remove_all(const cv::String& /*path*/) { NOT_IMPLEMENTED }
CV_EXPORTS bool createDirectory(const cv::String& /*path*/) { NOT_IMPLEMENTED }
CV_EXPORTS bool createDirectories(const cv::String& /*path*/) { NOT_IMPLEMENTED }
CV_EXPORTS cv::String getCacheDirectory(const char* /*sub_directory_name*/, const char* /*configuration_name = NULL*/) { NOT_IMPLEMENTED }
cv::String canonical(const cv::String& /*path*/) { NOT_IMPLEMENTED }
bool exists(const cv::String& /*path*/) { NOT_IMPLEMENTED }
void remove_all(const cv::String& /*path*/) { NOT_IMPLEMENTED }
bool createDirectory(const cv::String& /*path*/) { NOT_IMPLEMENTED }
bool createDirectories(const cv::String& /*path*/) { NOT_IMPLEMENTED }
cv::String getCacheDirectory(const char* /*sub_directory_name*/, const char* /*configuration_name = NULL*/) { NOT_IMPLEMENTED }
#undef NOT_IMPLEMENTED
#endif // OPENCV_HAVE_FILESYSTEM_SUPPORT

View File

@ -0,0 +1,67 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "../precomp.hpp"
#include <vector>
#include <opencv2/core/utils/logger.defines.hpp>
#undef CV_LOG_STRIP_LEVEL
#define CV_LOG_STRIP_LEVEL CV_LOG_LEVEL_VERBOSE + 1
#include "opencv2/core/utils/logger.hpp"
#include "opencv2/core/utils/filesystem.hpp"
namespace cv { namespace samples {
static cv::Ptr< std::vector<cv::String> > g_data_search_path;
static cv::Ptr< std::vector<cv::String> > g_data_search_subdir;
static std::vector<cv::String>& _getDataSearchPath()
{
if (g_data_search_path.empty())
g_data_search_path.reset(new std::vector<cv::String>());
return *(g_data_search_path.get());
}
static std::vector<cv::String>& _getDataSearchSubDirectory()
{
if (g_data_search_subdir.empty())
{
g_data_search_subdir.reset(new std::vector<cv::String>());
g_data_search_subdir->push_back("samples/data");
g_data_search_subdir->push_back("data");
g_data_search_subdir->push_back("");
}
return *(g_data_search_subdir.get());
}
CV_EXPORTS void addSamplesDataSearchPath(const cv::String& path)
{
if (utils::fs::isDirectory(path))
_getDataSearchPath().push_back(path);
}
CV_EXPORTS void addSamplesDataSearchSubDirectory(const cv::String& subdir)
{
_getDataSearchSubDirectory().push_back(subdir);
}
cv::String findFile(const cv::String& relative_path, bool required, bool silentMode)
{
CV_LOG_DEBUG(NULL, cv::format("cv::samples::findFile('%s', %s)", relative_path.c_str(), required ? "true" : "false"));
cv::String result = cv::utils::findDataFile(relative_path,
"OPENCV_SAMPLES_DATA_PATH",
&_getDataSearchPath(),
&_getDataSearchSubDirectory());
if (result != relative_path && !silentMode)
{
CV_LOG_WARNING(NULL, "cv::samples::findFile('" << relative_path << "') => '" << result << "'");
}
if (result.empty() && required)
CV_Error(cv::Error::StsError, cv::format("OpenCV samples: Can't find required data file: %s", relative_path.c_str()));
return result;
}
}} // namespace

View File

@ -2,6 +2,7 @@
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
#include "opencv2/core/utils/logger.hpp"
namespace opencv_test { namespace {
@ -283,4 +284,21 @@ TEST(CommandLineParser, testScalar)
EXPECT_EQ(parser.get<Scalar>("s5"), Scalar(5, -4, 3, 2));
}
TEST(Samples, findFile)
{
cv::utils::logging::LogLevel prev = cv::utils::logging::setLogLevel(cv::utils::logging::LOG_LEVEL_VERBOSE);
cv::String path;
ASSERT_NO_THROW(path = samples::findFile("lena.jpg", false));
EXPECT_NE(std::string(), path.c_str());
cv::utils::logging::setLogLevel(prev);
}
TEST(Samples, findFile_missing)
{
cv::utils::logging::LogLevel prev = cv::utils::logging::setLogLevel(cv::utils::logging::LOG_LEVEL_VERBOSE);
cv::String path;
ASSERT_ANY_THROW(path = samples::findFile("non-existed.file", true));
cv::utils::logging::setLogLevel(prev);
}
}} // namespace

View File

@ -84,6 +84,23 @@ class Arguments(NewOpenCVTests):
self.assertEqual(res4, "InputArrayOfArrays: empty()=false kind=0x00050000 flags=0x01050000 total(-1)=3 dims(-1)=1 size(-1)=3x1 type(0)=CV_32FC2 dims(0)=2 size(0)=3x1 type(0)=CV_32FC2")
class SamplesFindFile(NewOpenCVTests):
def test_ExistedFile(self):
res = cv.samples.findFile('lena.jpg', False)
self.assertNotEqual(res, '')
def test_MissingFile(self):
res = cv.samples.findFile('non_existed.file', False)
self.assertEqual(res, '')
def test_MissingFileException(self):
try:
res = cv.samples.findFile('non_existed.file', True)
self.assertEqual("Dead code", 0)
except cv.error as _e:
pass
if __name__ == '__main__':
NewOpenCVTests.bootstrap()

View File

@ -41,7 +41,7 @@ int main(int argc, char** argv)
if (video.size() == 1 && isdigit(video[0]))
capture.open(parser.get<int>("@video"));
else
capture.open(video);
capture.open(samples::findFileOrKeep(video)); // keep GStreamer pipelines
int nframes = 0;
if (capture.isOpened())
{

View File

@ -38,7 +38,10 @@ int main(int argc, const char** argv)
if (file.empty())
cap.open(camera);
else
{
file = samples::findFileOrKeep(file); // ignore gstreamer pipelines
cap.open(file.c_str());
}
if (!cap.isOpened())
{
cout << "Can not open video stream: '" << (file.empty() ? "<camera>" : file) << "'" << endl;

View File

@ -254,12 +254,31 @@ static bool readStringList( const string& filename, vector<string>& l )
FileStorage fs(filename, FileStorage::READ);
if( !fs.isOpened() )
return false;
size_t dir_pos = filename.rfind('/');
if (dir_pos == string::npos)
dir_pos = filename.rfind('\\');
FileNode n = fs.getFirstTopLevelNode();
if( n.type() != FileNode::SEQ )
return false;
FileNodeIterator it = n.begin(), it_end = n.end();
for( ; it != it_end; ++it )
l.push_back((string)*it);
{
string fname = (string)*it;
if (dir_pos != string::npos)
{
string fpath = samples::findFile(filename.substr(0, dir_pos + 1) + fname, false);
if (fpath.empty())
{
fpath = samples::findFile(fname);
}
fname = fpath;
}
else
{
fname = samples::findFile(fname);
}
l.push_back(fname);
}
return true;
}
@ -383,10 +402,10 @@ int main( int argc, char** argv )
if( !inputFilename.empty() )
{
if( !videofile && readStringList(inputFilename, imageList) )
if( !videofile && readStringList(samples::findFile(inputFilename), imageList) )
mode = CAPTURING;
else
capture.open(inputFilename);
capture.open(samples::findFileOrKeep(inputFilename));
}
else
capture.open(cameraId);

View File

@ -17,8 +17,7 @@
* The program takes as input a source and a destination image (for 1-3 methods)
* and outputs the cloned image.
*
* Download test images from opencv_extra folder @github.
*
* Download test images from opencv_extra repository.
*/
#include "opencv2/photo.hpp"
@ -27,7 +26,6 @@
#include "opencv2/highgui.hpp"
#include "opencv2/core.hpp"
#include <iostream>
#include <stdlib.h>
using namespace std;
using namespace cv;
@ -35,6 +33,7 @@ using namespace cv;
int main()
{
cout << endl;
cout << "Note: specify OPENCV_SAMPLES_DATA_PATH_HINT=<opencv_extra>/testdata/cv" << endl << endl;
cout << "Cloning Module" << endl;
cout << "---------------" << endl;
cout << "Options: " << endl;
@ -54,9 +53,9 @@ int main()
if(num == 1)
{
string folder = "cloning/Normal_Cloning/";
string original_path1 = folder + "source1.png";
string original_path2 = folder + "destination1.png";
string original_path3 = folder + "mask.png";
string original_path1 = samples::findFile(folder + "source1.png");
string original_path2 = samples::findFile(folder + "destination1.png");
string original_path3 = samples::findFile(folder + "mask.png");
Mat source = imread(original_path1, IMREAD_COLOR);
Mat destination = imread(original_path2, IMREAD_COLOR);
@ -86,14 +85,14 @@ int main()
seamlessClone(source, destination, mask, p, result, 1);
imshow("Output",result);
imwrite(folder + "cloned.png", result);
imwrite("cloned.png", result);
}
else if(num == 2)
{
string folder = "cloning/Mixed_Cloning/";
string original_path1 = folder + "source1.png";
string original_path2 = folder + "destination1.png";
string original_path3 = folder + "mask.png";
string original_path1 = samples::findFile(folder + "source1.png");
string original_path2 = samples::findFile(folder + "destination1.png");
string original_path3 = samples::findFile(folder + "mask.png");
Mat source = imread(original_path1, IMREAD_COLOR);
Mat destination = imread(original_path2, IMREAD_COLOR);
@ -123,14 +122,14 @@ int main()
seamlessClone(source, destination, mask, p, result, 2);
imshow("Output",result);
imwrite(folder + "cloned.png", result);
imwrite("cloned.png", result);
}
else if(num == 3)
{
string folder = "cloning/Monochrome_Transfer/";
string original_path1 = folder + "source1.png";
string original_path2 = folder + "destination1.png";
string original_path3 = folder + "mask.png";
string original_path1 = samples::findFile(folder + "source1.png");
string original_path2 = samples::findFile(folder + "destination1.png");
string original_path3 = samples::findFile(folder + "mask.png");
Mat source = imread(original_path1, IMREAD_COLOR);
Mat destination = imread(original_path2, IMREAD_COLOR);
@ -160,13 +159,13 @@ int main()
seamlessClone(source, destination, mask, p, result, 3);
imshow("Output",result);
imwrite(folder + "cloned.png", result);
imwrite("cloned.png", result);
}
else if(num == 4)
{
string folder = "cloning/Color_Change/";
string original_path1 = folder + "source1.png";
string original_path2 = folder + "mask.png";
string folder = "cloning/color_change/";
string original_path1 = samples::findFile(folder + "source1.png");
string original_path2 = samples::findFile(folder + "mask.png");
Mat source = imread(original_path1, IMREAD_COLOR);
Mat mask = imread(original_path2, IMREAD_COLOR);
@ -187,13 +186,13 @@ int main()
colorChange(source, mask, result, 1.5, .5, .5);
imshow("Output",result);
imwrite(folder + "cloned.png", result);
imwrite("cloned.png", result);
}
else if(num == 5)
{
string folder = "cloning/Illumination_Change/";
string original_path1 = folder + "source1.png";
string original_path2 = folder + "mask.png";
string original_path1 = samples::findFile(folder + "source1.png");
string original_path2 = samples::findFile(folder + "mask.png");
Mat source = imread(original_path1, IMREAD_COLOR);
Mat mask = imread(original_path2, IMREAD_COLOR);
@ -214,13 +213,13 @@ int main()
illuminationChange(source, mask, result, 0.2f, 0.4f);
imshow("Output",result);
imwrite(folder + "cloned.png", result);
imwrite("cloned.png", result);
}
else if(num == 6)
{
string folder = "cloning/Texture_Flattening/";
string original_path1 = folder + "source1.png";
string original_path2 = folder + "mask.png";
string original_path1 = samples::findFile(folder + "source1.png");
string original_path2 = samples::findFile(folder + "mask.png");
Mat source = imread(original_path1, IMREAD_COLOR);
Mat mask = imread(original_path2, IMREAD_COLOR);
@ -241,7 +240,12 @@ int main()
textureFlattening(source, mask, result, 30, 45, 3);
imshow("Output",result);
imwrite(folder + "cloned.png", result);
imwrite("cloned.png", result);
}
else
{
cerr << "Invalid selection: " << num << endl;
exit(1);
}
waitKey(0);
}

View File

@ -30,14 +30,12 @@
* Result: The cloned image will be displayed.
*/
#include <signal.h>
#include "opencv2/photo.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/core.hpp"
#include <iostream>
#include <stdlib.h>
// we're NOT "using namespace std;" here, to avoid collisions between the beta variable and std::beta in c++17
using std::cin;
@ -320,9 +318,9 @@ int main()
cout << "Enter Destination Image: ";
cin >> dest;
img0 = imread(src);
img0 = imread(samples::findFile(src));
img2 = imread(dest);
img2 = imread(samples::findFile(dest));
if(img0.empty())
{
@ -370,7 +368,7 @@ int main()
cout << "Blue: ";
cin >> blue;
img0 = imread(src);
img0 = imread(samples::findFile(src));
if(img0.empty())
{
@ -400,7 +398,7 @@ int main()
cout << "beta: ";
cin >> beta;
img0 = imread(src);
img0 = imread(samples::findFile(src));
if(img0.empty())
{
@ -433,7 +431,7 @@ int main()
cout << "kernel_size: ";
cin >> kernel_size;
img0 = imread(src);
img0 = imread(samples::findFile(src));
if(img0.empty())
{

View File

@ -35,14 +35,14 @@ static void on_trackbar(int, void*)
int main( int argc, const char** argv )
{
CommandLineParser parser(argc, argv, "{@image|../data/stuff.jpg|image for converting to a grayscale}");
CommandLineParser parser(argc, argv, "{@image|stuff.jpg|image for converting to a grayscale}");
parser.about("\nThis program demonstrates connected components and use of the trackbar\n");
parser.printMessage();
cout << "\nThe image is converted to grayscale and displayed, another image has a trackbar\n"
"that controls thresholding and thereby the extracted contours which are drawn in color\n";
String inputImage = parser.get<string>(0);
img = imread(inputImage, IMREAD_GRAYSCALE);
img = imread(samples::findFile(inputImage), IMREAD_GRAYSCALE);
if(img.empty())
{

View File

@ -95,7 +95,7 @@ void mouseHandler(int event, int x, int y, int, void*)
int main(int argc, char **argv)
{
CommandLineParser parser(argc, argv, "{@input | ../data/lena.jpg | input image}");
CommandLineParser parser(argc, argv, "{@input | lena.jpg | input image}");
parser.about("This program demonstrates using mouse events\n");
parser.printMessage();
cout << "\n\tleft mouse button - set a point to create mask shape\n"
@ -103,13 +103,13 @@ int main(int argc, char **argv)
"\tmiddle mouse button - reset\n";
String input_image = parser.get<String>("@input");
src = imread(input_image);
src = imread(samples::findFile(input_image));
if (src.empty())
{
{
printf("Error opening image: %s\n", input_image.c_str());
return 0;
}
}
namedWindow("Source", WINDOW_AUTOSIZE);
setMouseCallback("Source", mouseHandler, NULL);

View File

@ -49,7 +49,7 @@ int main(int , char** )
return 1;
}
std::string cascadeFrontalfilename = "../../data/lbpcascades/lbpcascade_frontalface.xml";
std::string cascadeFrontalfilename = samples::findFile("data/lbpcascades/lbpcascade_frontalface.xml");
cv::Ptr<cv::CascadeClassifier> cascade = makePtr<cv::CascadeClassifier>(cascadeFrontalfilename);
cv::Ptr<DetectionBasedTracker::IDetector> MainDetector = makePtr<CascadeDetectorAdapter>(cascade);
if ( cascade->empty() )

View File

@ -59,12 +59,12 @@ static void updateBrightnessContrast( int /*arg*/, void* )
static void help()
{
std::cout << "\nThis program demonstrates the use of calcHist() -- histogram creation.\n"
<< "Usage: \n" << "demhist [image_name -- Defaults to ../data/baboon.jpg]" << std::endl;
<< "Usage: \n" << "demhist [image_name -- Defaults to baboon.jpg]" << std::endl;
}
const char* keys =
{
"{help h||}{@image|../data/baboon.jpg|input image file}"
"{help h||}{@image|baboon.jpg|input image file}"
};
int main( int argc, const char** argv )
@ -78,7 +78,7 @@ int main( int argc, const char** argv )
string inputImage = parser.get<string>(0);
// Load the source image. HighGUI use.
image = imread( inputImage, 0 );
image = imread(samples::findFile(inputImage), IMREAD_GRAYSCALE);
if(image.empty())
{
std::cerr << "Cannot read image file: " << inputImage << std::endl;

View File

@ -14,7 +14,7 @@ static void help()
{
cout << "\n This program demonstrates how to use BLOB to detect and filter region \n"
"Usage: \n"
" ./detect_blob <image1(../data/detect_blob.png as default)>\n"
" ./detect_blob <image1(detect_blob.png as default)>\n"
"Press a key when image window is active to change descriptor";
}
@ -70,20 +70,19 @@ static String Legende(SimpleBlobDetector::Params &pAct)
int main(int argc, char *argv[])
{
vector<String> fileName;
Mat img(600, 800, CV_8UC1);
cv::CommandLineParser parser(argc, argv, "{@input |../data/detect_blob.png| }{h help | | }");
String fileName;
cv::CommandLineParser parser(argc, argv, "{@input |detect_blob.png| }{h help | | }");
if (parser.has("h"))
{
help();
return 0;
}
fileName.push_back(parser.get<string>("@input"));
img = imread(fileName[0], IMREAD_COLOR);
if (img.rows*img.cols <= 0)
fileName = parser.get<string>("@input");
Mat img = imread(samples::findFile(fileName), IMREAD_COLOR);
if (img.empty())
{
cout << "Image " << fileName[0] << " is empty or cannot be found\n";
return(0);
cout << "Image " << fileName << " is empty or cannot be found\n";
return 1;
}
SimpleBlobDetector::Params pDefaultBLOB;
@ -116,14 +115,17 @@ int main(int argc, char *argv[])
vector< Vec3b > palette;
for (int i = 0; i<65536; i++)
{
palette.push_back(Vec3b((uchar)rand(), (uchar)rand(), (uchar)rand()));
uchar c1 = (uchar)rand();
uchar c2 = (uchar)rand();
uchar c3 = (uchar)rand();
palette.push_back(Vec3b(c1, c2, c3));
}
help();
// These descriptors are going to be detecting and computing BLOBS with 6 different params
// Param for first BLOB detector we want all
typeDesc.push_back("BLOB"); // see http://docs.opencv.org/trunk/d0/d7a/classcv_1_1SimpleBlobDetector.html
typeDesc.push_back("BLOB"); // see http://docs.opencv.org/3.4/d0/d7a/classcv_1_1SimpleBlobDetector.html
pBLOB.push_back(pDefaultBLOB);
pBLOB.back().filterByArea = true;
pBLOB.back().minArea = 1;
@ -150,7 +152,7 @@ int main(int argc, char *argv[])
pBLOB.back().filterByConvexity = true;
pBLOB.back().minConvexity = 0.;
pBLOB.back().maxConvexity = (float)0.9;
// Param for six BLOB detector we want blob with gravity center color equal to 0 bug #4321 must be fixed
// Param for six BLOB detector we want blob with gravity center color equal to 0
typeDesc.push_back("BLOB");
pBLOB.push_back(pDefaultBLOB);
pBLOB.back().filterByColor = true;

View File

@ -412,7 +412,7 @@ int main(int argc, char *argv[])
string input = parser.get<string>("@input");
if (!input.empty())
{
imgOrig = imread(input, IMREAD_GRAYSCALE);
imgOrig = imread(samples::findFile(input), IMREAD_GRAYSCALE);
blur(imgOrig, img, blurSize);
}
else

View File

@ -14,12 +14,12 @@ static void help()
printf("\nThis program demonstrated the use of the discrete Fourier transform (dft)\n"
"The dft of an image is taken and it's power spectrum is displayed.\n"
"Usage:\n"
"./dft [image_name -- default ../data/lena.jpg]\n");
"./dft [image_name -- default lena.jpg]\n");
}
const char* keys =
{
"{help h||}{@image|../data/lena.jpg|input image file}"
"{help h||}{@image|lena.jpg|input image file}"
};
int main(int argc, const char ** argv)
@ -32,7 +32,7 @@ int main(int argc, const char ** argv)
return 0;
}
string filename = parser.get<string>(0);
Mat img = imread(filename, IMREAD_GRAYSCALE);
Mat img = imread(samples::findFile(filename), IMREAD_GRAYSCALE);
if( img.empty() )
{
help();

View File

@ -91,7 +91,7 @@ static void help()
{
printf("\nProgram to demonstrate the use of the distance transform function between edge images.\n"
"Usage:\n"
"./distrans [image_name -- default image is ../data/stuff.jpg]\n"
"./distrans [image_name -- default image is stuff.jpg]\n"
"\nHot keys: \n"
"\tESC - quit the program\n"
"\tC - use C/Inf metric\n"
@ -107,7 +107,7 @@ static void help()
const char* keys =
{
"{help h||}{@image |../data/stuff.jpg|input image file}"
"{help h||}{@image |stuff.jpg|input image file}"
};
int main( int argc, const char** argv )
@ -117,7 +117,7 @@ int main( int argc, const char** argv )
if (parser.has("help"))
return 0;
string filename = parser.get<string>(0);
gray = imread(filename, 0);
gray = imread(samples::findFile(filename), 0);
if(gray.empty())
{
printf("Cannot read image file: %s\n", filename.c_str());

View File

@ -43,12 +43,12 @@ static void help()
{
printf("\nThis sample demonstrates Canny edge detection\n"
"Call:\n"
" /.edge [image_name -- Default is ../data/fruits.jpg]\n\n");
" /.edge [image_name -- Default is fruits.jpg]\n\n");
}
const char* keys =
{
"{help h||}{@image |../data/fruits.jpg|input image name}"
"{help h||}{@image |fruits.jpg|input image name}"
};
int main( int argc, const char** argv )
@ -57,7 +57,7 @@ int main( int argc, const char** argv )
CommandLineParser parser(argc, argv, keys);
string filename = parser.get<string>(0);
image = imread(filename, IMREAD_COLOR);
image = imread(samples::findFile(filename), IMREAD_COLOR);
if(image.empty())
{
printf("Cannot read image file: %s\n", filename.c_str());

View File

@ -18,7 +18,7 @@ static void help()
" [--try-flip]\n"
" [filename|camera_index]\n\n"
"see facedetect.cmd for one call:\n"
"./facedetect --cascade=\"../../data/haarcascades/haarcascade_frontalface_alt.xml\" --nested-cascade=\"../../data/haarcascades/haarcascade_eye_tree_eyeglasses.xml\" --scale=1.3\n\n"
"./facedetect --cascade=\"data/haarcascades/haarcascade_frontalface_alt.xml\" --nested-cascade=\"data/haarcascades/haarcascade_eye_tree_eyeglasses.xml\" --scale=1.3\n\n"
"During execution:\n\tHit any key to quit.\n"
"\tUsing OpenCV version " << CV_VERSION << "\n" << endl;
}
@ -41,8 +41,8 @@ int main( int argc, const char** argv )
cv::CommandLineParser parser(argc, argv,
"{help h||}"
"{cascade|../../data/haarcascades/haarcascade_frontalface_alt.xml|}"
"{nested-cascade|../../data/haarcascades/haarcascade_eye_tree_eyeglasses.xml|}"
"{cascade|data/haarcascades/haarcascade_frontalface_alt.xml|}"
"{nested-cascade|data/haarcascades/haarcascade_eye_tree_eyeglasses.xml|}"
"{scale|1|}{try-flip||}{@filename||}"
);
if (parser.has("help"))
@ -62,9 +62,9 @@ int main( int argc, const char** argv )
parser.printErrors();
return 0;
}
if ( !nestedCascade.load( nestedCascadeName ) )
if (!nestedCascade.load(samples::findFileOrKeep(nestedCascadeName)))
cerr << "WARNING: Could not load classifier cascade for nested objects" << endl;
if( !cascade.load( cascadeName ) )
if (!cascade.load(samples::findFile(cascadeName)))
{
cerr << "ERROR: Could not load classifier cascade" << endl;
help();
@ -74,21 +74,31 @@ int main( int argc, const char** argv )
{
int camera = inputName.empty() ? 0 : inputName[0] - '0';
if(!capture.open(camera))
cout << "Capture from camera #" << camera << " didn't work" << endl;
}
else if( inputName.size() )
{
image = imread( inputName, 1 );
if( image.empty() )
{
if(!capture.open( inputName ))
cout << "Capture from camera #" << camera << " didn't work" << endl;
return 1;
}
}
else if (!inputName.empty())
{
image = imread(samples::findFileOrKeep(inputName), IMREAD_COLOR);
if (image.empty())
{
if (!capture.open(samples::findFileOrKeep(inputName)))
{
cout << "Could not read " << inputName << endl;
return 1;
}
}
}
else
{
image = imread( "../data/lena.jpg", 1 );
if(image.empty()) cout << "Couldn't read ../data/lena.jpg" << endl;
image = imread(samples::findFile("lena.jpg"), IMREAD_COLOR);
if (image.empty())
{
cout << "Couldn't read lena.jpg" << endl;
return 1;
}
}
if( capture.isOpened() )

View File

@ -32,14 +32,14 @@ string face_cascade_path, eye_cascade_path, nose_cascade_path, mouth_cascade_pat
int main(int argc, char** argv)
{
cv::CommandLineParser parser(argc, argv,
"{eyes||}{nose||}{mouth||}{help h||}");
"{eyes||}{nose||}{mouth||}{help h||}{@image||}{@facexml||}");
if (parser.has("help"))
{
help();
return 0;
}
input_image_path = parser.get<string>(0);
face_cascade_path = parser.get<string>(1);
input_image_path = parser.get<string>("@image");
face_cascade_path = parser.get<string>("@facexml");
eye_cascade_path = parser.has("eyes") ? parser.get<string>("eyes") : "";
nose_cascade_path = parser.has("nose") ? parser.get<string>("nose") : "";
mouth_cascade_path = parser.has("mouth") ? parser.get<string>("mouth") : "";
@ -50,7 +50,7 @@ int main(int argc, char** argv)
}
// Load image and cascade classifier files
Mat image;
image = imread(input_image_path);
image = imread(samples::findFile(input_image_path));
// Detect faces and facial features
vector<Rect_<int> > faces;
@ -92,15 +92,16 @@ static void help()
" \nhttps://github.com/opencv/opencv/tree/3.4/data/haarcascades";
cout << "\n\nThe classifiers for nose and mouth can be downloaded from : "
" \nhttps://github.com/opencv/opencv_contrib/tree/master/modules/face/data/cascades\n";
" \nhttps://github.com/opencv/opencv_contrib/tree/3.4/modules/face/data/cascades\n";
}
static void detectFaces(Mat& img, vector<Rect_<int> >& faces, string cascade_path)
{
CascadeClassifier face_cascade;
face_cascade.load(cascade_path);
face_cascade.load(samples::findFile(cascade_path));
face_cascade.detectMultiScale(img, faces, 1.15, 3, 0|CASCADE_SCALE_IMAGE, Size(30, 30));
if (!face_cascade.empty())
face_cascade.detectMultiScale(img, faces, 1.15, 3, 0|CASCADE_SCALE_IMAGE, Size(30, 30));
return;
}
@ -186,26 +187,29 @@ static void detectFacialFeaures(Mat& img, const vector<Rect_<int> > faces, strin
static void detectEyes(Mat& img, vector<Rect_<int> >& eyes, string cascade_path)
{
CascadeClassifier eyes_cascade;
eyes_cascade.load(cascade_path);
eyes_cascade.load(samples::findFile(cascade_path, !cascade_path.empty()));
eyes_cascade.detectMultiScale(img, eyes, 1.20, 5, 0|CASCADE_SCALE_IMAGE, Size(30, 30));
if (!eyes_cascade.empty())
eyes_cascade.detectMultiScale(img, eyes, 1.20, 5, 0|CASCADE_SCALE_IMAGE, Size(30, 30));
return;
}
static void detectNose(Mat& img, vector<Rect_<int> >& nose, string cascade_path)
{
CascadeClassifier nose_cascade;
nose_cascade.load(cascade_path);
nose_cascade.load(samples::findFile(cascade_path, !cascade_path.empty()));
nose_cascade.detectMultiScale(img, nose, 1.20, 5, 0|CASCADE_SCALE_IMAGE, Size(30, 30));
if (!nose_cascade.empty())
nose_cascade.detectMultiScale(img, nose, 1.20, 5, 0|CASCADE_SCALE_IMAGE, Size(30, 30));
return;
}
static void detectMouth(Mat& img, vector<Rect_<int> >& mouth, string cascade_path)
{
CascadeClassifier mouth_cascade;
mouth_cascade.load(cascade_path);
mouth_cascade.load(samples::findFile(cascade_path, !cascade_path.empty()));
mouth_cascade.detectMultiScale(img, mouth, 1.20, 5, 0|CASCADE_SCALE_IMAGE, Size(30, 30));
if (!mouth_cascade.empty())
mouth_cascade.detectMultiScale(img, mouth, 1.20, 5, 0|CASCADE_SCALE_IMAGE, Size(30, 30));
return;
}

View File

@ -87,7 +87,7 @@ int main(int argc, char** argv)
Mat img;
if (argc > 1)
img = imread(argv[1], IMREAD_GRAYSCALE);
img = imread(samples::findFile(argv[1]), IMREAD_GRAYSCALE);
else
img = DrawMyImage(2,256);

View File

@ -12,7 +12,7 @@ static void help()
{
cout << "\nThis program demonstrated the floodFill() function\n"
"Call:\n"
"./ffilldemo [image_name -- Default: ../data/fruits.jpg]\n" << endl;
"./ffilldemo [image_name -- Default: fruits.jpg]\n" << endl;
cout << "Hot keys: \n"
"\tESC - quit the program\n"
@ -74,7 +74,7 @@ static void onMouse( int event, int x, int y, int, void* )
int main( int argc, char** argv )
{
cv::CommandLineParser parser (argc, argv,
"{help h | | show help message}{@image|../data/fruits.jpg| input image}"
"{help h | | show help message}{@image|fruits.jpg| input image}"
);
if (parser.has("help"))
{
@ -82,7 +82,7 @@ int main( int argc, char** argv )
return 0;
}
string filename = parser.get<string>("@image");
image0 = imread(filename, 1);
image0 = imread(samples::findFile(filename), 1);
if( image0.empty() )
{

View File

@ -92,8 +92,8 @@ int main(int ac, char** av)
cout << "writing images\n";
fs << "images" << "[";
fs << "image1.jpg" << "myfi.png" << "../data/baboon.jpg";
cout << "image1.jpg" << " myfi.png" << " ../data/baboon.jpg" << endl;
fs << "image1.jpg" << "myfi.png" << "baboon.jpg";
cout << "image1.jpg" << " myfi.png" << " baboon.jpg" << endl;
fs << "]";

View File

@ -171,7 +171,7 @@ static void help()
"contours and approximate it by ellipses. Three methods are used to find the \n"
"elliptical fits: fitEllipse, fitEllipseAMS and fitEllipseDirect.\n"
"Call:\n"
"./fitellipse [image_name -- Default ../data/stuff.jpg]\n" << endl;
"./fitellipse [image_name -- Default ellipses.jpg]\n" << endl;
}
int sliderPos = 70;
@ -192,14 +192,14 @@ int main( int argc, char** argv )
fitEllipseAMSQ = true;
fitEllipseDirectQ = true;
cv::CommandLineParser parser(argc, argv,"{help h||}{@image|../data/ellipses.jpg|}");
cv::CommandLineParser parser(argc, argv,"{help h||}{@image|ellipses.jpg|}");
if (parser.has("help"))
{
help();
return 0;
}
string filename = parser.get<string>("@image");
image = imread(filename, 0);
image = imread(samples::findFile(filename), 0);
if( image.empty() )
{
cout << "Couldn't open image " << filename << "\n";

View File

@ -276,7 +276,7 @@ static void on_mouse( int event, int x, int y, int flags, void* param )
int main( int argc, char** argv )
{
cv::CommandLineParser parser(argc, argv, "{@input| ../data/messi5.jpg |}");
cv::CommandLineParser parser(argc, argv, "{@input| messi5.jpg |}");
help();
string filename = parser.get<string>("@input");
@ -285,7 +285,7 @@ int main( int argc, char** argv )
cout << "\nDurn, empty filename" << endl;
return 1;
}
Mat image = imread( filename, 1 );
Mat image = imread(samples::findFile(filename), IMREAD_COLOR);
if( image.empty() )
{
cout << "\n Durn, couldn't read image filename " << filename << endl;

View File

@ -266,6 +266,7 @@ int main(int argc, char *argv[])
cout << "Unsupported mode: " << mode << endl;
return -1;
}
file_name = samples::findFile(file_name);
cout << "Mode: " << mode << ", Backend: " << backend << ", File: " << file_name << ", Codec: " << codec << endl;
TickMeter total;

View File

@ -14,7 +14,7 @@ static void help()
"It shows reading of images, converting to planes and merging back, color conversion\n"
"and also iterating through pixels.\n"
"Call:\n"
"./image [image-name Default: ../data/lena.jpg]\n" << endl;
"./image [image-name Default: lena.jpg]\n" << endl;
}
// enable/disable use of mixed API in the code below.
@ -27,7 +27,7 @@ static void help()
int main( int argc, char** argv )
{
cv::CommandLineParser parser(argc, argv, "{help h | |}{@image|../data/lena.jpg|}");
cv::CommandLineParser parser(argc, argv, "{help h | |}{@image|lena.jpg|}");
if (parser.has("help"))
{
help();
@ -47,7 +47,7 @@ int main( int argc, char** argv )
// is converted, while the data is shared)
//! [iplimage]
#else
Mat img = imread(imagename); // the newer cvLoadImage alternative, MATLAB-style function
Mat img = imread(samples::findFile(imagename)); // the newer cvLoadImage alternative, MATLAB-style function
if(img.empty())
{
fprintf(stderr, "Can not load image %s\n", imagename.c_str());

View File

@ -3,7 +3,7 @@
* findTransformECC that implements the image alignment ECC algorithm
*
*
* The demo loads an image (defaults to ../data/fruits.jpg) and it artificially creates
* The demo loads an image (defaults to fruits.jpg) and it artificially creates
* a template image based on the given motion type. When two images are given,
* the first image is the input image and the second one defines the template image.
* In the latter case, you can also parse the warp's initialization.
@ -44,7 +44,7 @@ static void draw_warped_roi(Mat& image, const int width, const int height, Mat&
const std::string keys =
"{@inputImage | ../data/fruits.jpg | input image filename }"
"{@inputImage | fruits.jpg | input image filename }"
"{@templateImage | | template image filename (optional)}"
"{@inputWarp | | input warp (matrix) filename (optional)}"
"{n numOfIter | 50 | ECC's iterations }"
@ -65,10 +65,10 @@ static void help(void)
" are given, the initialization of the warp by command line parsing is possible. "
"If inputWarp is missing, the identity transformation initializes the algorithm. \n" << endl;
cout << "\nUsage example (one image): \n./ecc ../data/fruits.jpg -o=outWarp.ecc "
cout << "\nUsage example (one image): \n./image_alignment fruits.jpg -o=outWarp.ecc "
"-m=euclidean -e=1e-6 -N=70 -v=1 \n" << endl;
cout << "\nUsage example (two images with initialization): \n./ecc yourInput.png yourTemplate.png "
cout << "\nUsage example (two images with initialization): \n./image_alignment yourInput.png yourTemplate.png "
"yourInitialWarp.ecc -o=outWarp.ecc -m=homography -e=1e-6 -N=70 -v=1 -w=yourFinalImage.png \n" << endl;
}
@ -212,7 +212,7 @@ int main (const int argc, const char * argv[])
else
mode_temp = MOTION_HOMOGRAPHY;
Mat inputImage = imread(imgFile,0);
Mat inputImage = imread(samples::findFile(imgFile), IMREAD_GRAYSCALE);
if (inputImage.empty())
{
cerr << "Unable to load the inputImage" << endl;
@ -224,7 +224,7 @@ int main (const int argc, const char * argv[])
if (tempImgFile!="") {
inputImage.copyTo(target_image);
template_image = imread(tempImgFile,0);
template_image = imread(samples::findFile(tempImgFile), IMREAD_GRAYSCALE);
if (template_image.empty()){
cerr << "Unable to load the template image" << endl;
return -1;

View File

@ -14,7 +14,7 @@ static void help()
<< "with surrounding image areas.\n"
"Using OpenCV version %s\n" << CV_VERSION << "\n"
"Usage:\n"
"./inpaint [image_name -- Default ../data/fruits.jpg]\n" << endl;
"./inpaint [image_name -- Default fruits.jpg]\n" << endl;
cout << "Hot keys: \n"
"\tESC - quit the program\n"
@ -47,24 +47,24 @@ static void onMouse( int event, int x, int y, int flags, void* )
int main( int argc, char** argv )
{
cv::CommandLineParser parser(argc, argv, "{@image|../data/fruits.jpg|}");
cv::CommandLineParser parser(argc, argv, "{@image|fruits.jpg|}");
help();
string filename = parser.get<string>("@image");
Mat img0 = imread(filename, -1);
string filename = samples::findFile(parser.get<string>("@image"));
Mat img0 = imread(filename, IMREAD_COLOR);
if(img0.empty())
{
cout << "Couldn't open the image " << filename << ". Usage: inpaint <image_name>\n" << endl;
return 0;
}
namedWindow( "image", 1 );
namedWindow("image", WINDOW_AUTOSIZE);
img = img0.clone();
inpaintMask = Mat::zeros(img.size(), CV_8U);
imshow("image", img);
setMouseCallback( "image", onMouse, 0 );
setMouseCallback( "image", onMouse, NULL);
for(;;)
{

View File

@ -25,39 +25,46 @@ int smoothType = GAUSSIAN;
int main( int argc, char** argv )
{
VideoCapture cap;
cv::CommandLineParser parser(argc, argv, "{ c | 0 | }{ p | | }");
help();
if( parser.get<string>("c").size() == 1 && isdigit(parser.get<string>("c")[0]) )
VideoCapture cap;
string camera = parser.get<string>("c");
if (camera.size() == 1 && isdigit(camera[0]))
cap.open(parser.get<int>("c"));
else
cap.open(parser.get<string>("c"));
if( cap.isOpened() )
cout << "Video " << parser.get<string>("c") <<
": width=" << cap.get(CAP_PROP_FRAME_WIDTH) <<
", height=" << cap.get(CAP_PROP_FRAME_HEIGHT) <<
", nframes=" << cap.get(CAP_PROP_FRAME_COUNT) << endl;
if( parser.has("p") )
cap.open(samples::findFileOrKeep(camera));
if (!cap.isOpened())
{
int pos = parser.get<int>("p");
if (!parser.check())
{
parser.printErrors();
return -1;
}
cout << "seeking to frame #" << pos << endl;
cap.set(CAP_PROP_POS_FRAMES, pos);
cerr << "Can't open camera/video stream: " << camera << endl;
return 1;
}
if( !cap.isOpened() )
cout << "Video " << parser.get<string>("c") <<
": width=" << cap.get(CAP_PROP_FRAME_WIDTH) <<
", height=" << cap.get(CAP_PROP_FRAME_HEIGHT) <<
", nframes=" << cap.get(CAP_PROP_FRAME_COUNT) << endl;
int pos = 0;
if (parser.has("p"))
{
cout << "Could not initialize capturing...\n";
pos = parser.get<int>("p");
}
if (!parser.check())
{
parser.printErrors();
return -1;
}
namedWindow( "Laplacian", 0 );
createTrackbar( "Sigma", "Laplacian", &sigma, 15, 0 );
if (pos != 0)
{
cout << "seeking to frame #" << pos << endl;
if (!cap.set(CAP_PROP_POS_FRAMES, pos))
{
cerr << "ERROR: seekeing is not supported" << endl;
}
}
namedWindow("Laplacian", WINDOW_AUTOSIZE);
createTrackbar("Sigma", "Laplacian", &sigma, 15, 0);
Mat smoothed, laplace, result;

View File

@ -520,13 +520,13 @@ int main( int argc, char *argv[] )
string data_filename;
int method = 0;
cv::CommandLineParser parser(argc, argv, "{data|../data/letter-recognition.data|}{save||}{load||}{boost||}"
cv::CommandLineParser parser(argc, argv, "{data|letter-recognition.data|}{save||}{load||}{boost||}"
"{mlp||}{knn knearest||}{nbayes||}{svm||}");
data_filename = parser.get<string>("data");
data_filename = samples::findFile(parser.get<string>("data"));
if (parser.has("save"))
filename_to_save = parser.get<string>("save");
if (parser.has("load"))
filename_to_load = parser.get<string>("load");
filename_to_load = samples::findFile(parser.get<string>("load"));
if (parser.has("boost"))
method = 1;
else if (parser.has("mlp"))

View File

@ -7,10 +7,10 @@
using namespace std;
using namespace cv;
void getMatWithQRCodeContour(Mat &color_image, vector<Point> transform);
void getMatWithFPS(Mat &color_image, double fps);
int liveQRCodeDetect();
int showImageQRCodeDetect(string in, string out);
static void drawQRCodeContour(Mat &color_image, vector<Point> transform);
static void drawFPS(Mat &color_image, double fps);
static int liveQRCodeDetect(const string& out_file);
static int imageQRCodeDetect(const string& in_file, const string& out_file);
int main(int argc, char *argv[])
{
@ -28,7 +28,9 @@ int main(int argc, char *argv[])
}
string in_file_name = cmd_parser.get<string>("in"); // input path to image
string out_file_name = cmd_parser.get<string>("out"); // output path to image
string out_file_name;
if (cmd_parser.has("out"))
out_file_name = cmd_parser.get<string>("out"); // output path to image
if (!cmd_parser.check())
{
@ -39,16 +41,16 @@ int main(int argc, char *argv[])
int return_code = 0;
if (in_file_name.empty())
{
return_code = liveQRCodeDetect();
return_code = liveQRCodeDetect(out_file_name);
}
else
{
return_code = showImageQRCodeDetect(in_file_name, out_file_name);
return_code = imageQRCodeDetect(samples::findFile(in_file_name), out_file_name);
}
return return_code;
}
void getMatWithQRCodeContour(Mat &color_image, vector<Point> transform)
void drawQRCodeContour(Mat &color_image, vector<Point> transform)
{
if (!transform.empty())
{
@ -70,19 +72,19 @@ void getMatWithQRCodeContour(Mat &color_image, vector<Point> transform)
}
}
void getMatWithFPS(Mat &color_image, double fps)
void drawFPS(Mat &color_image, double fps)
{
ostringstream convert;
convert << cvRound(fps) << " FPS.";
convert << cvRound(fps) << " FPS (QR detection)";
putText(color_image, convert.str(), Point(25, 25), FONT_HERSHEY_DUPLEX, 1, Scalar(0, 0, 255), 2);
}
int liveQRCodeDetect()
int liveQRCodeDetect(const string& out_file)
{
VideoCapture cap(0);
if(!cap.isOpened())
{
cout << "Cannot open a camera" << '\n';
cout << "Cannot open a camera" << endl;
return -4;
}
@ -94,7 +96,11 @@ int liveQRCodeDetect()
string decode_info;
vector<Point> transform;
cap >> frame;
if(frame.empty()) { break; }
if (frame.empty())
{
cout << "End of video stream" << endl;
break;
}
cvtColor(frame, src, COLOR_BGR2GRAY);
total.start();
@ -102,24 +108,30 @@ int liveQRCodeDetect()
if (result_detection)
{
decode_info = qrcode.decode(src, transform, straight_barcode);
if (!decode_info.empty()) { cout << decode_info << '\n'; }
if (!decode_info.empty()) { cout << decode_info << endl; }
}
total.stop();
double fps = 1 / total.getTimeSec();
total.reset();
if (result_detection) { getMatWithQRCodeContour(frame, transform); }
getMatWithFPS(frame, fps);
if (result_detection) { drawQRCodeContour(frame, transform); }
drawFPS(frame, fps);
imshow("Live QR code detector", frame);
if( waitKey(30) > 0 ) { break; }
char c = (char)waitKey(30);
if (c == 27)
break;
if (c == ' ' && !out_file.empty())
imwrite(out_file, frame); // TODO write original frame too
}
return 0;
}
int showImageQRCodeDetect(string in, string out)
int imageQRCodeDetect(const string& in_file, const string& out_file)
{
Mat src = imread(in, IMREAD_GRAYSCALE), straight_barcode;
Mat color_src = imread(in_file, IMREAD_COLOR), src;
cvtColor(color_src, src, COLOR_BGR2GRAY);
Mat straight_barcode;
string decoded_info;
vector<Point> transform;
const int count_experiments = 10;
@ -135,54 +147,40 @@ int showImageQRCodeDetect(string in, string out)
total.stop();
transform_time += total.getTimeSec();
total.reset();
if (!result_detection) { break; }
if (!result_detection)
continue;
total.start();
decoded_info = qrcode.decode(src, transform, straight_barcode);
total.stop();
transform_time += total.getTimeSec();
total.reset();
if (decoded_info.empty()) { break; }
}
double fps = count_experiments / transform_time;
if (!result_detection) { cout << "QR code not found\n"; return -2; }
if (decoded_info.empty()) { cout << "QR code cannot be decoded\n"; return -3; }
if (!result_detection)
cout << "QR code not found" << endl;
if (decoded_info.empty())
cout << "QR code cannot be decoded" << endl;
Mat color_src = imread(in);
getMatWithQRCodeContour(color_src, transform);
getMatWithFPS(color_src, fps);
drawQRCodeContour(color_src, transform);
drawFPS(color_src, fps);
cout << "Input image file path: " << in_file << endl;
cout << "Output image file path: " << out_file << endl;
cout << "Size: " << color_src.size() << endl;
cout << "FPS: " << fps << endl;
cout << "Decoded info: " << decoded_info << endl;
if (!out_file.empty())
{
imwrite(out_file, color_src);
}
for(;;)
{
imshow("Detect QR code on image", color_src);
if( waitKey(30) > 0 ) { break; }
}
if (!out.empty())
{
getMatWithQRCodeContour(color_src, transform);
getMatWithFPS(color_src, fps);
cout << "Input image file path: " << in << '\n';
cout << "Output image file path: " << out << '\n';
cout << "Size: " << color_src.size() << '\n';
cout << "FPS: " << fps << '\n';
cout << "Decoded info: " << decoded_info << '\n';
vector<int> compression_params;
compression_params.push_back(IMWRITE_PNG_COMPRESSION);
compression_params.push_back(9);
try
{
imwrite(out, color_src, compression_params);
}
catch (const cv::Exception& ex)
{
cout << "Exception converting image to PNG format: ";
cout << ex.what() << '\n';
return -3;
}
if (waitKey(0) == 27)
break;
}
return 0;
}

View File

@ -83,7 +83,7 @@ static float calculateAccuracyPercent(const Mat &original, const Mat &predicted)
int main()
{
const String filename = "../data/data01.xml";
const String filename = samples::findFile("data01.xml");
cout << "**********************************************************************" << endl;
cout << filename
<< " contains digits 0 and 1 of 20 samples each, collected on an Android device" << endl;

View File

@ -9,7 +9,7 @@ using namespace cv;
int main(int argc, char** argv)
{
cv::CommandLineParser parser(argc, argv,
"{input i|../data/building.jpg|input image}"
"{input i|building.jpg|input image}"
"{refine r|false|if true use LSD_REFINE_STD method, if false use LSD_REFINE_NONE method}"
"{canny c|false|use Canny edge detector}"
"{overlay o|false|show result on input image}"
@ -23,7 +23,7 @@ int main(int argc, char** argv)
parser.printMessage();
String filename = parser.get<String>("input");
String filename = samples::findFile(parser.get<String>("input"));
bool useRefine = parser.get<bool>("refine");
bool useCanny = parser.get<bool>("canny");
bool overlay = parser.get<bool>("overlay");

View File

@ -8,17 +8,27 @@ using namespace cv;
int main( int argc, const char** argv )
{
CommandLineParser parser(argc, argv,
"{ i | ../data/lena_tmpl.jpg |image name }"
"{ t | ../data/tmpl.png |template name }"
"{ m | ../data/mask.png |mask name }"
"{ i | lena_tmpl.jpg |image name }"
"{ t | tmpl.png |template name }"
"{ m | mask.png |mask name }"
"{ cm| 3 |comparison method }");
cout << "This program demonstrates the use of template matching with mask.\n\n";
cout << "This program demonstrates the use of template matching with mask." << endl
<< endl
<< "Available methods: https://docs.opencv.org/3.4/df/dfb/group__imgproc__object.html#ga3a7850640f1fe1f58fe91a2d7583695d" << endl
<< " TM_SQDIFF = " << (int)TM_SQDIFF << endl
<< " TM_SQDIFF_NORMED = " << (int)TM_SQDIFF_NORMED << endl
<< " TM_CCORR = " << (int)TM_CCORR << endl
<< " TM_CCORR_NORMED = " << (int)TM_CCORR_NORMED << endl
<< " TM_CCOEFF = " << (int)TM_CCOEFF << endl
<< " TM_CCOEFF_NORMED = " << (int)TM_CCOEFF_NORMED << endl
<< endl;
parser.printMessage();
string filename = parser.get<string>("i");
string tmplname = parser.get<string>("t");
string maskname = parser.get<string>("m");
string filename = samples::findFile(parser.get<string>("i"));
string tmplname = samples::findFile(parser.get<string>("t"));
string maskname = samples::findFile(parser.get<string>("m"));
Mat img = imread(filename);
Mat tmpl = imread(tmplname);
Mat mask = imread(maskname);

View File

@ -12,7 +12,7 @@ static void help()
{
cout << "\n This program demonstrates how to detect compute and match ORB BRISK and AKAZE descriptors \n"
"Usage: \n"
" ./matchmethod_orb_akaze_brisk --image1=<image1(../data/basketball1.png as default)> --image2=<image2(../data/basketball2.png as default)>\n"
" ./matchmethod_orb_akaze_brisk --image1=<image1(basketball1.png as default)> --image2=<image2(basketball2.png as default)>\n"
"Press a key when image window is active to change algorithm or descriptor";
}
@ -28,34 +28,34 @@ int main(int argc, char *argv[])
typeDesc.push_back("AKAZE"); // see http://docs.opencv.org/trunk/d8/d30/classcv_1_1AKAZE.html
typeDesc.push_back("ORB"); // see http://docs.opencv.org/trunk/de/dbf/classcv_1_1BRISK.html
typeDesc.push_back("BRISK"); // see http://docs.opencv.org/trunk/db/d95/classcv_1_1ORB.html
// This algorithm would be used to match descriptors see http://docs.opencv.org/trunk/db/d39/classcv_1_1DescriptorMatcher.html#ab5dc5036569ecc8d47565007fa518257
// This algorithm would be used to match descriptors see http://docs.opencv.org/trunk/db/d39/classcv_1_1DescriptorMatcher.html#ab5dc5036569ecc8d47565007fa518257
typeAlgoMatch.push_back("BruteForce");
typeAlgoMatch.push_back("BruteForce-L1");
typeAlgoMatch.push_back("BruteForce-Hamming");
typeAlgoMatch.push_back("BruteForce-Hamming(2)");
cv::CommandLineParser parser(argc, argv,
"{ @image1 | ../data/basketball1.png | }"
"{ @image2 | ../data/basketball2.png | }"
"{ @image1 | basketball1.png | }"
"{ @image2 | basketball2.png | }"
"{help h ||}");
if (parser.has("help"))
{
help();
return 0;
}
fileName.push_back(parser.get<string>(0));
fileName.push_back(parser.get<string>(1));
fileName.push_back(samples::findFile(parser.get<string>(0)));
fileName.push_back(samples::findFile(parser.get<string>(1)));
Mat img1 = imread(fileName[0], IMREAD_GRAYSCALE);
Mat img2 = imread(fileName[1], IMREAD_GRAYSCALE);
if (img1.rows*img1.cols <= 0)
{
cout << "Image " << fileName[0] << " is empty or cannot be found\n";
return(0);
}
if (img2.rows*img2.cols <= 0)
{
cout << "Image " << fileName[1] << " is empty or cannot be found\n";
return(0);
}
if (img1.empty())
{
cerr << "Image " << fileName[0] << " is empty or cannot be found" << endl;
return 1;
}
if (img2.empty())
{
cerr << "Image " << fileName[1] << " is empty or cannot be found" << endl;
return 1;
}
vector<double> desMethCmp;
Ptr<Feature2D> b;
@ -74,10 +74,10 @@ int main(int argc, char *argv[])
vector<String>::iterator itMatcher = typeAlgoMatch.end();
if (*itDesc == "AKAZE-DESCRIPTOR_KAZE_UPRIGHT"){
b = AKAZE::create(AKAZE::DESCRIPTOR_KAZE_UPRIGHT);
}
}
if (*itDesc == "AKAZE"){
b = AKAZE::create();
}
}
if (*itDesc == "ORB"){
b = ORB::create();
}
@ -157,12 +157,12 @@ int main(int argc, char *argv[])
}
catch (const Exception& e)
{
cerr << "Exception: " << e.what() << endl;
cout << "Feature : " << *itDesc << "\n";
if (itMatcher != typeAlgoMatch.end())
{
cout << "Matcher : " << *itMatcher << "\n";
}
cout << e.msg << endl;
}
}
int i=0;

View File

@ -18,7 +18,7 @@ int main( int /*argc*/, char** /*argv*/ )
{
help();
Mat img(500, 500, CV_8UC3);
Mat img(500, 500, CV_8UC3, Scalar::all(0));
RNG& rng = theRNG();
for(;;)

View File

@ -33,8 +33,8 @@ int erode_dilate_pos = 0;
// callback function for open/close trackbar
static void OpenClose(int, void*)
{
int n = open_close_pos - max_iters;
int an = n > 0 ? n : -n;
int n = open_close_pos;
int an = abs(n);
Mat element = getStructuringElement(element_shape, Size(an*2+1, an*2+1), Point(an, an) );
if( n < 0 )
morphologyEx(src, dst, MORPH_OPEN, element);
@ -46,8 +46,8 @@ static void OpenClose(int, void*)
// callback function for erode/dilate trackbar
static void ErodeDilate(int, void*)
{
int n = erode_dilate_pos - max_iters;
int an = n > 0 ? n : -n;
int n = erode_dilate_pos;
int an = abs(n);
Mat element = getStructuringElement(element_shape, Size(an*2+1, an*2+1), Point(an, an) );
if( n < 0 )
erode(src, dst, element);
@ -59,13 +59,13 @@ static void ErodeDilate(int, void*)
int main( int argc, char** argv )
{
cv::CommandLineParser parser(argc, argv, "{help h||}{ @image | ../data/baboon.jpg | }");
cv::CommandLineParser parser(argc, argv, "{help h||}{ @image | baboon.jpg | }");
if (parser.has("help"))
{
help();
return 0;
}
std::string filename = parser.get<std::string>("@image");
std::string filename = samples::findFile(parser.get<std::string>("@image"));
if( (src = imread(filename,IMREAD_COLOR)).empty() )
{
help();
@ -78,7 +78,14 @@ int main( int argc, char** argv )
open_close_pos = erode_dilate_pos = max_iters;
createTrackbar("iterations", "Open/Close",&open_close_pos,max_iters*2+1,OpenClose);
setTrackbarMin("iterations", "Open/Close", -max_iters);
setTrackbarMax("iterations", "Open/Close", max_iters);
setTrackbarPos("iterations", "Open/Close", 0);
createTrackbar("iterations", "Erode/Dilate",&erode_dilate_pos,max_iters*2+1,ErodeDilate);
setTrackbarMin("iterations", "Erode/Dilate", -max_iters);
setTrackbarMax("iterations", "Erode/Dilate", max_iters);
setTrackbarPos("iterations", "Erode/Dilate", 0);
for(;;)
{

View File

@ -28,26 +28,22 @@ using namespace cv;
int main(int argc, char* argv[])
{
cv::CommandLineParser parser(argc, argv, "{help h||show help message}{@image|../data/lena.jpg|input image}");
cv::CommandLineParser parser(argc, argv, "{help h||show help message}{@image|lena.jpg|input image}");
if (parser.has("help"))
{
parser.printMessage();
exit(0);
}
if (parser.get<string>("@image").empty())
{
parser.printMessage();
exit(0);
return 0;
}
string filename = samples::findFile(parser.get<string>("@image"));
Mat I = imread(parser.get<string>("@image"));
Mat I = imread(filename);
int num,type;
if(I.empty())
{
cout << "Image not found" << endl;
exit(0);
return 1;
}
cout << endl;

View File

@ -72,7 +72,10 @@ int main(int argc, char** argv)
if (file.empty())
cap.open(camera);
else
cap.open(file.c_str());
{
file = samples::findFileOrKeep(file);
cap.open(file);
}
if (!cap.isOpened())
{
cout << "Can not open video stream: '" << (file.empty() ? "<camera>" : file) << "'" << endl;

View File

@ -2,11 +2,6 @@
#include "opencv2/imgproc.hpp"
#include "opencv2/ml.hpp"
#include "opencv2/highgui.hpp"
#ifdef HAVE_OPENCV_OCL
#define _OCL_KNN_ 1 // select whether using ocl::KNN method or not, default is using
#define _OCL_SVM_ 1 // select whether using ocl::svm method or not, default is using
#include "opencv2/ocl/ocl.hpp"
#endif
#include <stdio.h>

View File

@ -24,7 +24,7 @@ int main( int argc, char** argv )
if( arg.size() == 1 && isdigit(arg[0]) )
capture.open( arg[0] - '0' );
else
capture.open( arg.c_str() );
capture.open(samples::findFileOrKeep(arg));
if( !capture.isOpened() )
{

View File

@ -73,7 +73,7 @@ int main(int argc, char** argv)
if (input.empty())
cap.open(0);
else
cap.open(input);
cap.open(samples::findFileOrKeep(input));
if( !cap.isOpened() )
{

View File

@ -416,7 +416,7 @@ int main(int argc, char** argv)
if ( parser.get<string>("@input").size() == 1 && isdigit(parser.get<string>("@input")[0]) )
cameraId = parser.get<int>("@input");
else
inputName = parser.get<string>("@input");
inputName = samples::findFileOrKeep(parser.get<string>("@input"));
if (!parser.check())
{
puts(help);

View File

@ -16,7 +16,7 @@ static void help()
" [--try-flip]\n"
" [video_filename|camera_index]\n\n"
"Example:\n"
"./smiledetect --cascade=\"../../data/haarcascades/haarcascade_frontalface_alt.xml\" --smile-cascade=\"../../data/haarcascades/haarcascade_smile.xml\" --scale=2.0\n\n"
"./smiledetect --cascade=\"data/haarcascades/haarcascade_frontalface_alt.xml\" --smile-cascade=\"data/haarcascades/haarcascade_smile.xml\" --scale=2.0\n\n"
"During execution:\n\tHit any key to quit.\n"
"\tUsing OpenCV version " << CV_VERSION << "\n" << endl;
}
@ -41,16 +41,16 @@ int main( int argc, const char** argv )
double scale;
cv::CommandLineParser parser(argc, argv,
"{help h||}{scale|1|}"
"{cascade|../../data/haarcascades/haarcascade_frontalface_alt.xml|}"
"{smile-cascade|../../data/haarcascades/haarcascade_smile.xml|}"
"{cascade|data/haarcascades/haarcascade_frontalface_alt.xml|}"
"{smile-cascade|data/haarcascades/haarcascade_smile.xml|}"
"{try-flip||}{@input||}");
if (parser.has("help"))
{
help();
return 0;
}
cascadeName = parser.get<string>("cascade");
nestedCascadeName = parser.get<string>("smile-cascade");
cascadeName = samples::findFile(parser.get<string>("cascade"));
nestedCascadeName = samples::findFile(parser.get<string>("smile-cascade"));
tryflip = parser.has("try-flip");
inputName = parser.get<string>("@input");
scale = parser.get<int>("scale");
@ -81,6 +81,7 @@ int main( int argc, const char** argv )
}
else if( inputName.size() )
{
inputName = samples::findFileOrKeep(inputName);
if(!capture.open( inputName ))
cout << "Could not read " << inputName << endl;
}

View File

@ -138,8 +138,8 @@ static void drawSquares( Mat& image, const vector<vector<Point> >& squares )
int main(int argc, char** argv)
{
static const char* names[] = { "../data/pic1.png", "../data/pic2.png", "../data/pic3.png",
"../data/pic4.png", "../data/pic5.png", "../data/pic6.png", 0 };
static const char* names[] = { "data/pic1.png", "data/pic2.png", "data/pic3.png",
"data/pic4.png", "data/pic5.png", "data/pic6.png", 0 };
help(argv[0]);
if( argc > 1)
@ -152,10 +152,11 @@ int main(int argc, char** argv)
for( int i = 0; names[i] != 0; i++ )
{
Mat image = imread(names[i], IMREAD_COLOR);
string filename = samples::findFile(names[i]);
Mat image = imread(filename, IMREAD_COLOR);
if( image.empty() )
{
cout << "Couldn't load " << names[i] << endl;
cout << "Couldn't load " << filename << endl;
continue;
}

View File

@ -18,7 +18,6 @@
Homepage: http://opencv.org
Online docs: http://docs.opencv.org
Q&A forum: http://answers.opencv.org
Issue tracker: http://code.opencv.org
GitHub: https://github.com/opencv/opencv/
************************************************** */
@ -46,11 +45,11 @@ static int print_help()
" on the chessboards, and a flag: useCalibrated for \n"
" calibrated (0) or\n"
" uncalibrated \n"
" (1: use cvStereoCalibrate(), 2: compute fundamental\n"
" (1: use stereoCalibrate(), 2: compute fundamental\n"
" matrix separately) stereo. \n"
" Calibrate the cameras and display the\n"
" rectified results along with the computed disparity images. \n" << endl;
cout << "Usage:\n ./stereo_calib -w=<board_width default=9> -h=<board_height default=6> -s=<square_size default=1.0> <image list XML/YML file default=../data/stereo_calib.xml>\n" << endl;
cout << "Usage:\n ./stereo_calib -w=<board_width default=9> -h=<board_height default=6> -s=<square_size default=1.0> <image list XML/YML file default=stereo_calib.xml>\n" << endl;
return 0;
}
@ -347,11 +346,11 @@ int main(int argc, char** argv)
Size boardSize;
string imagelistfn;
bool showRectified;
cv::CommandLineParser parser(argc, argv, "{w|9|}{h|6|}{s|1.0|}{nr||}{help||}{@input|../data/stereo_calib.xml|}");
cv::CommandLineParser parser(argc, argv, "{w|9|}{h|6|}{s|1.0|}{nr||}{help||}{@input|stereo_calib.xml|}");
if (parser.has("help"))
return print_help();
showRectified = !parser.has("nr");
imagelistfn = parser.get<string>("@input");
imagelistfn = samples::findFile(parser.get<string>("@input"));
boardSize.width = parser.get<int>("w");
boardSize.height = parser.get<int>("h");
float squareSize = parser.get<float>("s");

View File

@ -65,8 +65,8 @@ int main(int argc, char** argv)
print_help();
return 0;
}
img1_filename = parser.get<std::string>(0);
img2_filename = parser.get<std::string>(1);
img1_filename = samples::findFile(parser.get<std::string>(0));
img2_filename = samples::findFile(parser.get<std::string>(1));
if (parser.has("algorithm"))
{
std::string _alg = parser.get<std::string>("algorithm");

View File

@ -112,7 +112,7 @@ int parseCmdArgs(int argc, char** argv)
}
else
{
Mat img = imread(argv[i]);
Mat img = imread(samples::findFile(argv[i]));
if (img.empty())
{
cout << "Can't read image '" << argv[i] << "'\n";

View File

@ -405,7 +405,7 @@ int main(int argc, char* argv[])
for (int i = 0; i < num_images; ++i)
{
full_img = imread(img_names[i]);
full_img = imread(samples::findFile(img_names[i]));
full_img_sizes[i] = full_img.size();
if (full_img.empty())
@ -727,7 +727,7 @@ int main(int argc, char* argv[])
LOGLN("Compositing image #" << indices[img_idx]+1);
// Read image and resize it if necessary
full_img = imread(img_names[img_idx]);
full_img = imread(samples::findFile(img_names[img_idx]));
if (!is_compose_scale_set)
{
if (compose_megapix > 0)

View File

@ -2,7 +2,7 @@
#include <iostream>
#include <time.h>
// OpenCV
#include <opencv2//core.hpp>
#include <opencv2/core.hpp>
#include <opencv2/core/utility.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>

View File

@ -164,8 +164,8 @@ int main(int argc, const char* argv[])
return -1;
}
Mat frame0 = imread(frame0_name, IMREAD_GRAYSCALE);
Mat frame1 = imread(frame1_name, IMREAD_GRAYSCALE);
Mat frame0 = imread(samples::findFile(frame0_name), IMREAD_GRAYSCALE);
Mat frame1 = imread(samples::findFile(frame1_name), IMREAD_GRAYSCALE);
if (frame0.empty())
{

View File

@ -20,7 +20,7 @@ static void help(char** argv)
cout << "\nThis is a demo program shows how perspective transformation applied on an image, \n"
"Using OpenCV version " << CV_VERSION << endl;
cout << "\nUsage:\n" << argv[0] << " [image_name -- Default ../data/right.jpg]\n" << endl;
cout << "\nUsage:\n" << argv[0] << " [image_name -- Default data/right.jpg]\n" << endl;
cout << "\nHot keys: \n"
"\tESC, q - quit the program\n"
@ -45,9 +45,9 @@ bool validation_needed = true;
int main(int argc, char** argv)
{
help(argv);
CommandLineParser parser(argc, argv, "{@input| ../data/right.jpg |}");
CommandLineParser parser(argc, argv, "{@input| data/right.jpg |}");
string filename = parser.get<string>("@input");
string filename = samples::findFile(parser.get<string>("@input"));
Mat original_image = imread( filename );
Mat image;

View File

@ -13,7 +13,7 @@ static void help()
{
cout << "\nThis program demonstrates the famous watershed segmentation algorithm in OpenCV: watershed()\n"
"Usage:\n"
"./watershed [image_name -- default is ../data/fruits.jpg]\n" << endl;
"./watershed [image_name -- default is fruits.jpg]\n" << endl;
cout << "Hot keys: \n"
@ -48,18 +48,18 @@ static void onMouse( int event, int x, int y, int flags, void* )
int main( int argc, char** argv )
{
cv::CommandLineParser parser(argc, argv, "{help h | | }{ @input | ../data/fruits.jpg | }");
cv::CommandLineParser parser(argc, argv, "{help h | | }{ @input | fruits.jpg | }");
if (parser.has("help"))
{
help();
return 0;
}
string filename = parser.get<string>("@input");
string filename = samples::findFile(parser.get<string>("@input"));
Mat img0 = imread(filename, 1), imgGray;
if( img0.empty() )
{
cout << "Couldn'g open image " << filename << ". Usage: watershed <image_name>\n";
cout << "Couldn't open image " << filename << ". Usage: watershed <image_name>\n";
return 0;
}
help();

View File

@ -0,0 +1,15 @@
%YAML:1.0
images:
- left01.jpg
- left02.jpg
- left03.jpg
- left04.jpg
- left05.jpg
- left06.jpg
- left07.jpg
- left08.jpg
- left09.jpg
- left11.jpg
- left12.jpg
- left13.jpg
- left14.jpg

View File

@ -64,9 +64,9 @@ int main(int argc, char **argv)
parser.printMessage();
return 0;
}
string modelTxt = parser.get<string>("proto");
string modelBin = parser.get<string>("model");
string imageFile = parser.get<string>("image");
string modelTxt = samples::findFile(parser.get<string>("proto"));
string modelBin = samples::findFile(parser.get<string>("model"));
string imageFile = samples::findFile(parser.get<string>("image"));
bool useOpenCL = parser.has("opencl");
if (!parser.check())
{

View File

@ -86,6 +86,10 @@ def findFile(filename):
if os.path.exists(filename):
return filename
fpath = cv.samples.findFile(filename, False)
if fpath:
return fpath
samplesDataDir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'..',
'data',

View File

@ -43,7 +43,7 @@ cv.dnn_registerLayer('Crop', CropLayer)
#! [Register]
# Load the model.
net = cv.dnn.readNet(args.prototxt, args.caffemodel)
net = cv.dnn.readNet(cv.samples.findFile(args.prototxt), cv.samples.findFile(args.caffemodel))
kWinName = 'Holistically-Nested Edge Detection'
cv.namedWindow('Input', cv.WINDOW_NORMAL)

View File

@ -13,7 +13,7 @@ parser.add_argument('--height', default=-1, type=int, help='Resize input to spec
parser.add_argument('--median_filter', default=0, type=int, help='Kernel size of postprocessing blurring.')
args = parser.parse_args()
net = cv.dnn.readNetFromTorch(args.model)
net = cv.dnn.readNetFromTorch(cv.samples.findFile(args.model))
net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV);
if args.input:

View File

@ -68,13 +68,13 @@ def drawBox(frame, classId, conf, left, top, right, bottom):
# Load a network
net = cv.dnn.readNet(args.model, args.config)
net = cv.dnn.readNet(cv.samples.findFile(args.model), cv.samples.findFile(args.config))
net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
winName = 'Mask-RCNN in OpenCV'
cv.namedWindow(winName, cv.WINDOW_NORMAL)
cap = cv.VideoCapture(args.input if args.input else 0)
cap = cv.VideoCapture(cv.samples.findFileOrKeep(args.input) if args.input else 0)
legend = None
while cv.waitKey(1) < 0:
hasFrame, frame = cap.read()

View File

@ -26,12 +26,12 @@ parser.add_argument('--annotations', help='Path to COCO annotations file.', requ
args = parser.parse_args()
### Get OpenCV predictions #####################################################
net = cv.dnn.readNetFromTensorflow(args.weights, args.prototxt)
net = cv.dnn.readNetFromTensorflow(cv.samples.findFile(args.weights), cv.samples.findFile(args.prototxt))
net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV);
detections = []
for imgName in os.listdir(args.images):
inp = cv.imread(os.path.join(args.images, imgName))
inp = cv.imread(cv.samples.findFile(os.path.join(args.images, imgName)))
rows = inp.shape[0]
cols = inp.shape[1]
inp = cv.resize(inp, (300, 300))

View File

@ -67,7 +67,7 @@ if args.classes:
classes = f.read().rstrip('\n').split('\n')
# Load a network
net = cv.dnn.readNet(args.model, args.config, args.framework)
net = cv.dnn.readNet(cv.samples.findFile(args.model), cv.samples.findFile(args.config), args.framework)
net.setPreferableBackend(args.backend)
net.setPreferableTarget(args.target)
outNames = net.getUnconnectedOutLayersNames()
@ -182,7 +182,7 @@ def callback(pos):
cv.createTrackbar('Confidence threshold, %', winName, int(confThreshold * 100), 99, callback)
cap = cv.VideoCapture(args.input if args.input else 0)
cap = cv.VideoCapture(cv.samples.findFileOrKeep(args.input) if args.input else 0)
while cv.waitKey(1) < 0:
hasFrame, frame = cap.read()
if not hasFrame:

View File

@ -66,9 +66,9 @@ int main(int argc, char **argv)
"{ t threshold | 0.1 | threshold or confidence value for the heatmap }"
);
String modelTxt = parser.get<string>("proto");
String modelBin = parser.get<string>("model");
String imageFile = parser.get<String>("image");
String modelTxt = samples::findFile(parser.get<string>("proto"));
String modelBin = samples::findFile(parser.get<string>("model"));
String imageFile = samples::findFile(parser.get<String>("image"));
int W_in = parser.get<int>("width");
int H_in = parser.get<int>("height");
float thresh = parser.get<float>("threshold");

View File

@ -45,7 +45,7 @@ else:
inWidth = args.width
inHeight = args.height
net = cv.dnn.readNetFromCaffe(args.proto, args.model)
net = cv.dnn.readNetFromCaffe(cv.samples.findFile(args.proto), cv.samples.findFile(args.model))
cap = cv.VideoCapture(args.input if args.input else 0)

View File

@ -116,11 +116,11 @@ if __name__ == '__main__':
try:
fn1, fn2 = args
except:
fn1 = '../data/aero1.jpg'
fn2 = '../data/aero3.jpg'
fn1 = 'aero1.jpg'
fn2 = 'aero3.jpg'
img1 = cv.imread(fn1, 0)
img2 = cv.imread(fn2, 0)
img1 = cv.imread(cv.samples.findFile(fn1), cv.IMREAD_GRAYSCALE)
img2 = cv.imread(cv.samples.findFile(fn2), cv.IMREAD_GRAYSCALE)
detector, matcher = init_feature(feature_name)
if img1 is None:

View File

@ -32,7 +32,7 @@ if __name__ == '__main__':
print()
if len(sys.argv) > 1:
fn = sys.argv[1]
fn = cv.samples.findFile(sys.argv[1])
print('loading %s ...' % fn)
img = cv.imread(fn)
if img is None:

View File

@ -53,7 +53,7 @@ if __name__ == '__main__':
obj_points = []
img_points = []
h, w = cv.imread(img_names[0], 0).shape[:2] # TODO: use imquery call to retrieve results
h, w = cv.imread(img_names[0], cv.IMREAD_GRAYSCALE).shape[:2] # TODO: use imquery call to retrieve results
def processImage(fn):
print('processing %s... ' % fn)

View File

@ -160,7 +160,7 @@ def draw_camera_boards(ax, camera_matrix, cam_width, cam_height, scale_focal,
def main():
parser = argparse.ArgumentParser(description='Plot camera calibration extrinsics.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--calibration', type=str, default="../data/left_intrinsics.yml",
parser.add_argument('--calibration', type=str, default='left_intrinsics.yml',
help='YAML camera calibration file.')
parser.add_argument('--cam_width', type=float, default=0.064/2,
help='Width/2 of the displayed camera.')
@ -172,7 +172,7 @@ def main():
help='The calibration board is static and the camera is moving.')
args = parser.parse_args()
fs = cv.FileStorage(args.calibration, cv.FILE_STORAGE_READ)
fs = cv.FileStorage(cv.samples.findFile(args.calibration), cv.FILE_STORAGE_READ)
board_width = int(fs.getNode('board_width').real())
board_height = int(fs.getNode('board_height').real())
square_size = fs.getNode('square_size').real()

View File

@ -51,9 +51,9 @@ if __name__ == '__main__':
try:
fn = sys.argv[1]
except:
fn = '../data/baboon.jpg'
fn = 'baboon.jpg'
src = cv.imread(fn)
src = cv.imread(cv.samples.findFile(fn))
def nothing(*argv):
pass

View File

@ -39,7 +39,7 @@ if __name__ == '__main__':
fn = sys.argv[1]
except:
fn = 0
cam = video.create_capture(fn, fallback='synth:bg=../data/baboon.jpg:class=chess:noise=0.05')
cam = video.create_capture(fn, fallback='synth:bg=baboon.jpg:class=chess:noise=0.05')
while True:
flag, frame = cam.read()

View File

@ -19,11 +19,11 @@ Usage:
ESC - exit
Examples:
deconvolution.py --angle 135 --d 22 ../data/licenseplate_motion.jpg
deconvolution.py --angle 135 --d 22 licenseplate_motion.jpg
(image source: http://www.topazlabs.com/infocus/_images/licenseplate_compare.jpg)
deconvolution.py --angle 86 --d 31 ../data/text_motion.jpg
deconvolution.py --circle --d 19 ../data/text_defocus.jpg
deconvolution.py --angle 86 --d 31 text_motion.jpg
deconvolution.py --circle --d 19 text_defocus.jpg
(image source: compact digital photo camera, no artificial distortion)
@ -73,11 +73,11 @@ if __name__ == '__main__':
try:
fn = args[0]
except:
fn = '../data/licenseplate_motion.jpg'
fn = 'licenseplate_motion.jpg'
win = 'deconvolution'
img = cv.imread(fn, 0)
img = cv.imread(cv.samples.findFile(fn), cv.IMREAD_GRAYSCALE)
if img is None:
print('Failed to load file:', fn)
sys.exit(1)

View File

@ -38,8 +38,8 @@ def shift_dft(src, dst=None):
h, w = src.shape[:2]
cx1 = cx2 = w/2
cy1 = cy2 = h/2
cx1 = cx2 = w // 2
cy1 = cy2 = h // 2
# if the size is odd, then adjust the bottom/right quadrants
if w % 2 != 0:
@ -65,11 +65,13 @@ def shift_dft(src, dst=None):
if __name__ == "__main__":
if len(sys.argv) > 1:
im = cv.imread(sys.argv[1])
fname = sys.argv[1]
else:
im = cv.imread('../data/baboon.jpg')
fname = 'baboon.jpg'
print("usage : python dft.py <image_file>")
im = cv.imread(cv.samples.findFile(fname))
# convert to grayscale
im = cv.cvtColor(im, cv.COLOR_BGR2GRAY)
h, w = im.shape[:2]

View File

@ -3,7 +3,7 @@
'''
SVM and KNearest digit recognition.
Sample loads a dataset of handwritten digits from '../data/digits.png'.
Sample loads a dataset of handwritten digits from 'digits.png'.
Then it trains a SVM and KNearest classifiers on it and evaluates
their accuracy.
@ -42,7 +42,7 @@ from common import clock, mosaic
SZ = 20 # size of each digit is SZ x SZ
CLASS_N = 10
DIGITS_FN = '../data/digits.png'
DIGITS_FN = 'digits.png'
def split2d(img, cell_size, flatten=True):
h, w = img.shape[:2]
@ -54,8 +54,9 @@ def split2d(img, cell_size, flatten=True):
return cells
def load_digits(fn):
fn = cv.samples.findFile(fn)
print('loading "%s" ...' % fn)
digits_img = cv.imread(fn, 0)
digits_img = cv.imread(fn, cv.IMREAD_GRAYSCALE)
digits = split2d(digits_img, (SZ, SZ))
labels = np.repeat(np.arange(CLASS_N), len(digits)/CLASS_N)
return digits, labels

View File

@ -24,10 +24,11 @@ if __name__ == '__main__':
try:
fn = sys.argv[1]
except:
fn = '../data/fruits.jpg'
fn = 'fruits.jpg'
print(__doc__)
img = cv.imread(fn, 0)
fn = cv.samples.findFile(fn)
img = cv.imread(fn, cv.IMREAD_GRAYSCALE)
if img is None:
print('Failed to load fn:', fn)
sys.exit(1)

View File

@ -40,13 +40,13 @@ if __name__ == '__main__':
except:
video_src = 0
args = dict(args)
cascade_fn = args.get('--cascade', "../../data/haarcascades/haarcascade_frontalface_alt.xml")
nested_fn = args.get('--nested-cascade', "../../data/haarcascades/haarcascade_eye.xml")
cascade_fn = args.get('--cascade', "data/haarcascades/haarcascade_frontalface_alt.xml")
nested_fn = args.get('--nested-cascade', "data/haarcascades/haarcascade_eye.xml")
cascade = cv.CascadeClassifier(cascade_fn)
nested = cv.CascadeClassifier(nested_fn)
cascade = cv.CascadeClassifier(cv.samples.findFile(cascade_fn))
nested = cv.CascadeClassifier(cv.samples.findFile(nested_fn))
cam = create_capture(video_src, fallback='synth:bg=../data/lena.jpg:noise=0.05')
cam = create_capture(video_src, fallback='synth:bg={}:noise=0.05'.format(cv.samples.findFile('samples/data/lena.jpg')))
while True:
ret, img = cam.read()

View File

@ -147,11 +147,11 @@ if __name__ == '__main__':
try:
fn1, fn2 = args
except:
fn1 = '../data/box.png'
fn2 = '../data/box_in_scene.png'
fn1 = 'box.png'
fn2 = 'box_in_scene.png'
img1 = cv.imread(fn1, 0)
img2 = cv.imread(fn2, 0)
img1 = cv.imread(cv.samples.findFile(fn1), cv.IMREAD_GRAYSCALE)
img2 = cv.imread(cv.samples.findFile(fn2), cv.IMREAD_GRAYSCALE)
detector, matcher = init_feature(feature_name)
if img1 is None:

View File

@ -25,10 +25,10 @@ if __name__ == '__main__':
try:
fn = sys.argv[1]
except:
fn = '../data/fruits.jpg'
fn = 'fruits.jpg'
print(__doc__)
img = cv.imread(fn, True)
img = cv.imread(cv.samples.findFile(fn))
if img is None:
print('Failed to load image file:', fn)
sys.exit(1)

View File

@ -55,9 +55,9 @@ if __name__ == '__main__':
try:
img_fn = sys.argv[1]
except:
img_fn = '../data/baboon.jpg'
img_fn = 'baboon.jpg'
img = cv.imread(img_fn)
img = cv.imread(cv.samples.findFile(img_fn))
if img is None:
print('Failed to load image file:', img_fn)
sys.exit(1)

View File

@ -107,11 +107,11 @@ if __name__ == '__main__':
if len(sys.argv) == 2:
filename = sys.argv[1] # for drawing purposes
else:
print("No input image given, so loading default image, ../data/lena.jpg \n")
print("No input image given, so loading default image, lena.jpg \n")
print("Correct Usage: python grabcut.py <filename> \n")
filename = '../data/lena.jpg'
filename = 'lena.jpg'
img = cv.imread(filename)
img = cv.imread(cv.samples.findFile(filename))
img2 = img.copy() # a copy of original image
mask = np.zeros(img.shape[:2],dtype = np.uint8) # mask initialized to PR_BG
output = np.zeros(img.shape,np.uint8) # output image to be shown

View File

@ -60,10 +60,10 @@ if __name__ == '__main__':
if len(sys.argv)>1:
fname = sys.argv[1]
else :
fname = '../data/lena.jpg'
fname = 'lena.jpg'
print("usage : python hist.py <image_file>")
im = cv.imread(fname)
im = cv.imread(cv.samples.findFile(fname))
if im is None:
print('Failed to load image file:', fname)

View File

@ -5,7 +5,7 @@ This example illustrates how to use cv.HoughCircles() function.
Usage:
houghcircles.py [<image_name>]
image argument defaults to ../data/board.jpg
image argument defaults to board.jpg
'''
# Python 2/3 compatibility
@ -21,9 +21,9 @@ if __name__ == '__main__':
try:
fn = sys.argv[1]
except IndexError:
fn = "../data/board.jpg"
fn = 'board.jpg'
src = cv.imread(fn, 1)
src = cv.imread(cv.samples.findFile(fn))
img = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
img = cv.medianBlur(img, 5)
cimg = src.copy() # numpy function

View File

@ -5,7 +5,7 @@ This example illustrates how to use Hough Transform to find lines
Usage:
houghlines.py [<image_name>]
image argument defaults to ../data/pic1.png
image argument defaults to pic1.png
'''
# Python 2/3 compatibility
@ -22,9 +22,9 @@ if __name__ == '__main__':
try:
fn = sys.argv[1]
except IndexError:
fn = "../data/pic1.png"
fn = 'pic1.png'
src = cv.imread(fn)
src = cv.imread(cv.samples.findFile(fn))
dst = cv.Canny(src, 50, 200)
cdst = cv.cvtColor(dst, cv.COLOR_GRAY2BGR)

View File

@ -27,11 +27,11 @@ if __name__ == '__main__':
try:
fn = sys.argv[1]
except:
fn = '../data/fruits.jpg'
fn = 'fruits.jpg'
print(__doc__)
img = cv.imread(fn)
img = cv.imread(cv.samples.findFile(fn))
if img is None:
print('Failed to load image file:', fn)
sys.exit(1)

View File

@ -158,10 +158,12 @@ if __name__ == '__main__':
args, dummy = getopt.getopt(sys.argv[1:], '', ['model=', 'data=', 'load=', 'save='])
args = dict(args)
args.setdefault('--model', 'svm')
args.setdefault('--data', '../data/letter-recognition.data')
args.setdefault('--data', 'letter-recognition.data')
print('loading data %s ...' % args['--data'])
samples, responses = load_base(args['--data'])
datafile = cv.samples.findFile(args['--data'])
print('loading data %s ...' % datafile)
samples, responses = load_base(datafile)
Model = models[args['--model']]
model = Model()

View File

@ -22,9 +22,9 @@ if __name__ == '__main__':
try:
fn = sys.argv[1]
except IndexError:
fn = '../data/fruits.jpg'
fn = 'fruits.jpg'
img = cv.imread(fn)
img = cv.imread(cv.samples.findFile(fn))
if img is None:
print('Failed to load image file:', fn)
sys.exit(1)

View File

@ -31,9 +31,9 @@ if __name__ == '__main__':
try:
fn = sys.argv[1]
except:
fn = '../data/baboon.jpg'
fn = 'baboon.jpg'
img = cv.imread(fn)
img = cv.imread(cv.samples.findFile(fn))
if img is None:
print('Failed to load image file:', fn)

View File

@ -40,7 +40,7 @@ if __name__ == '__main__':
hog = cv.HOGDescriptor()
hog.setSVMDetector( cv.HOGDescriptor_getDefaultPeopleDetector() )
default = ['../data/basketball2.png '] if len(sys.argv[1:]) == 0 else []
default = [cv.samples.findFile('basketball2.png')] if len(sys.argv[1:]) == 0 else []
for fn in it.chain(*map(glob, default + sys.argv[1:])):
print(fn, ' - ',)

View File

@ -35,8 +35,8 @@ def write_ply(fn, verts, colors):
if __name__ == '__main__':
print('loading images...')
imgL = cv.pyrDown( cv.imread('../data/aloeL.jpg') ) # downscale images for faster processing
imgR = cv.pyrDown( cv.imread('../data/aloeR.jpg') )
imgL = cv.pyrDown(cv.imread(cv.samples.findFile('aloeL.jpg'))) # downscale images for faster processing
imgR = cv.pyrDown(cv.imread(cv.samples.findFile('aloeR.jpg')))
# disparity range is tuned for 'aloe' image pair
window_size = 3

View File

@ -21,9 +21,9 @@ if __name__ == '__main__':
try:
fn = sys.argv[1]
except:
fn = '../data/starry_night.jpg'
fn = 'starry_night.jpg'
img = cv.imread(fn)
img = cv.imread(cv.samples.findFile(fn))
if img is None:
print('Failed to load image file:', fn)
sys.exit(1)

View File

@ -98,8 +98,8 @@ class TestSceneRender():
if __name__ == '__main__':
backGr = cv.imread('../data/graf1.png')
fgr = cv.imread('../data/box.png')
backGr = cv.imread(cv.samples.findFile('graf1.png'))
fgr = cv.imread(cv.samples.findFile('box.png'))
render = TestSceneRender(backGr, fgr)

View File

@ -6,10 +6,10 @@ import argparse
## [Load image]
parser = argparse.ArgumentParser(description='Code for Histogram Calculation tutorial.')
parser.add_argument('--input', help='Path to input image.', default='../data/lena.jpg')
parser.add_argument('--input', help='Path to input image.', default='lena.jpg')
args = parser.parse_args()
src = cv.imread(args.input)
src = cv.imread(cv.samples.findFile(args.input))
if src is None:
print('Could not open or find the image:', args.input)
exit(0)

View File

@ -4,10 +4,10 @@ import argparse
## [Load image]
parser = argparse.ArgumentParser(description='Code for Histogram Equalization tutorial.')
parser.add_argument('--input', help='Path to input image.', default='../data/lena.jpg')
parser.add_argument('--input', help='Path to input image.', default='lena.jpg')
args = parser.parse_args()
src = cv.imread(args.input)
src = cv.imread(cv.samples.findFile(args.input))
if src is None:
print('Could not open or find the image:', args.input)
exit(0)

View File

@ -11,15 +11,15 @@ def main(argv):
window_name = 'filter2D Demo'
## [load]
imageName = argv[0] if len(argv) > 0 else "../data/lena.jpg"
imageName = argv[0] if len(argv) > 0 else 'lena.jpg'
# Loads an image
src = cv.imread(imageName, cv.IMREAD_COLOR)
src = cv.imread(cv.samples.findFile(imageName), cv.IMREAD_COLOR)
# Check if image is loaded fine
if src is None:
print ('Error opening image!')
print ('Usage: filter2D.py [image_name -- default ../data/lena.jpg] \n')
print ('Usage: filter2D.py [image_name -- default lena.jpg] \n')
return -1
## [load]
## [init_arguments]

View File

@ -5,11 +5,11 @@ import numpy as np
def main(argv):
## [load]
default_file = "../../../../data/smarties.png"
default_file = 'smarties.png'
filename = argv[0] if len(argv) > 0 else default_file
# Loads an image
src = cv.imread(filename, cv.IMREAD_COLOR)
src = cv.imread(cv.samples.findFile(filename), cv.IMREAD_COLOR)
# Check if image is loaded fine
if src is None:

Some files were not shown because too many files have changed in this diff Show More