Merge pull request #24773 from tailsu:sd/pathlike

python: accept path-like objects wherever file names are expected #24773

Merry Christmas, all 🎄

Implements #15731

Support is enabled for all arguments named `filename` or `filepath` (case-insensitive), or annotated with `CV_WRAP_FILE_PATH`.

Support is based on `PyOS_FSPath`, which is available in Python 3.6+. When running on older Python versions the arguments must have a `str` value as before.

### Pull Request Readiness Checklist

See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request

- [x] I agree to contribute to the project under Apache 2 License.
- [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV
- [x] The PR is proposed to the proper branch
- [x] There is a reference to the original bug report and related work
- [ ] There is accuracy test, performance test and test data in opencv_extra repository, if applicable
      Patch to opencv_extra has the same branch name.
- [ ] The feature is well documented and sample code can be built with the project CMake
This commit is contained in:
Stefan Dragnev 2024-01-12 14:23:05 +01:00 committed by GitHub
parent a7fa1e6f4b
commit 2791bb7062
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 92 additions and 26 deletions

View File

@ -475,6 +475,8 @@ Cv64suf;
#define CV_WRAP_MAPPABLE(mappable)
#define CV_WRAP_PHANTOM(phantom_header)
#define CV_WRAP_DEFAULT(val)
/* Indicates that the function parameter has filesystem path semantic */
#define CV_WRAP_FILE_PATH
/****************************************************************************************\
* Matrix type (Mat) *

View File

@ -484,7 +484,7 @@ CV__DNN_INLINE_NS_BEGIN
* Networks imported from Intel's Model Optimizer are launched in Intel's Inference Engine
* backend.
*/
CV_WRAP static Net readFromModelOptimizer(const String& xml, const String& bin);
CV_WRAP static Net readFromModelOptimizer(CV_WRAP_FILE_PATH const String& xml, CV_WRAP_FILE_PATH const String& bin);
/** @brief Create a network from Intel's Model Optimizer in-memory buffers with intermediate representation (IR).
* @param[in] bufferModelConfig buffer with model's configuration.
@ -517,7 +517,7 @@ CV__DNN_INLINE_NS_BEGIN
* @param path path to output file with .dot extension
* @see dump()
*/
CV_WRAP void dumpToFile(const String& path);
CV_WRAP void dumpToFile(CV_WRAP_FILE_PATH const String& path);
/** @brief Adds new layer to the net.
* @param name unique name of the adding layer.
* @param type typename of the adding layer (type must be registered in LayerRegister).
@ -890,7 +890,7 @@ CV__DNN_INLINE_NS_BEGIN
* @param darknetModel path to the .weights file with learned network.
* @returns Network object that ready to do forward, throw an exception in failure cases.
*/
CV_EXPORTS_W Net readNetFromDarknet(const String &cfgFile, const String &darknetModel = String());
CV_EXPORTS_W Net readNetFromDarknet(CV_WRAP_FILE_PATH const String &cfgFile, CV_WRAP_FILE_PATH const String &darknetModel = String());
/** @brief Reads a network model stored in <a href="https://pjreddie.com/darknet/">Darknet</a> model files.
* @param bufferCfg A buffer contains a content of .cfg file with text description of the network architecture.
@ -915,7 +915,7 @@ CV__DNN_INLINE_NS_BEGIN
* @param caffeModel path to the .caffemodel file with learned network.
* @returns Net object.
*/
CV_EXPORTS_W Net readNetFromCaffe(const String &prototxt, const String &caffeModel = String());
CV_EXPORTS_W Net readNetFromCaffe(CV_WRAP_FILE_PATH const String &prototxt, CV_WRAP_FILE_PATH const String &caffeModel = String());
/** @brief Reads a network model stored in Caffe model in memory.
* @param bufferProto buffer containing the content of the .prototxt file
@ -944,7 +944,7 @@ CV__DNN_INLINE_NS_BEGIN
* let us make it more flexible.
* @returns Net object.
*/
CV_EXPORTS_W Net readNetFromTensorflow(const String &model, const String &config = String());
CV_EXPORTS_W Net readNetFromTensorflow(CV_WRAP_FILE_PATH const String &model, CV_WRAP_FILE_PATH const String &config = String());
/** @brief Reads a network model stored in <a href="https://www.tensorflow.org/">TensorFlow</a> framework's format.
* @param bufferModel buffer containing the content of the pb file
@ -969,7 +969,7 @@ CV__DNN_INLINE_NS_BEGIN
* @param model path to the .tflite file with binary flatbuffers description of the network architecture
* @returns Net object.
*/
CV_EXPORTS_W Net readNetFromTFLite(const String &model);
CV_EXPORTS_W Net readNetFromTFLite(CV_WRAP_FILE_PATH const String &model);
/** @brief Reads a network model stored in <a href="https://www.tensorflow.org/lite">TFLite</a> framework's format.
* @param bufferModel buffer containing the content of the tflite file
@ -1011,7 +1011,7 @@ CV__DNN_INLINE_NS_BEGIN
*
* Also some equivalents of these classes from cunn, cudnn, and fbcunn may be successfully imported.
*/
CV_EXPORTS_W Net readNetFromTorch(const String &model, bool isBinary = true, bool evaluate = true);
CV_EXPORTS_W Net readNetFromTorch(CV_WRAP_FILE_PATH const String &model, bool isBinary = true, bool evaluate = true);
/**
* @brief Read deep learning network represented in one of the supported formats.
@ -1037,7 +1037,7 @@ CV__DNN_INLINE_NS_BEGIN
* @ref readNetFromTorch or @ref readNetFromDarknet. An order of @p model and @p config
* arguments does not matter.
*/
CV_EXPORTS_W Net readNet(const String& model, const String& config = "", const String& framework = "");
CV_EXPORTS_W Net readNet(CV_WRAP_FILE_PATH const String& model, CV_WRAP_FILE_PATH const String& config = "", const String& framework = "");
/**
* @brief Read deep learning network represented in one of the supported formats.
@ -1064,7 +1064,7 @@ CV__DNN_INLINE_NS_BEGIN
* backend.
*/
CV_EXPORTS_W
Net readNetFromModelOptimizer(const String &xml, const String &bin = "");
Net readNetFromModelOptimizer(CV_WRAP_FILE_PATH const String &xml, CV_WRAP_FILE_PATH const String &bin = "");
/** @brief Load a network from Intel's Model Optimizer intermediate representation.
* @param[in] bufferModelConfig Buffer contains XML configuration with network's topology.
@ -1093,7 +1093,7 @@ CV__DNN_INLINE_NS_BEGIN
* @param onnxFile path to the .onnx file with text description of the network architecture.
* @returns Network object that ready to do forward, throw an exception in failure cases.
*/
CV_EXPORTS_W Net readNetFromONNX(const String &onnxFile);
CV_EXPORTS_W Net readNetFromONNX(CV_WRAP_FILE_PATH const String &onnxFile);
/** @brief Reads a network model from <a href="https://onnx.ai/">ONNX</a>
* in-memory buffer.
@ -1116,7 +1116,7 @@ CV__DNN_INLINE_NS_BEGIN
* @param path to the .pb file with input tensor.
* @returns Mat.
*/
CV_EXPORTS_W Mat readTensorFromONNX(const String& path);
CV_EXPORTS_W Mat readTensorFromONNX(CV_WRAP_FILE_PATH const String& path);
/** @brief Creates 4-dimensional blob from image. Optionally resizes and crops @p image from center,
* subtract @p mean values, scales values by @p scalefactor, swap Blue and Red channels.
@ -1289,7 +1289,7 @@ CV__DNN_INLINE_NS_BEGIN
* is taken from NVidia's Caffe fork: https://github.com/NVIDIA/caffe.
* So the resulting model may be used there.
*/
CV_EXPORTS_W void shrinkCaffeModel(const String& src, const String& dst,
CV_EXPORTS_W void shrinkCaffeModel(CV_WRAP_FILE_PATH const String& src, CV_WRAP_FILE_PATH const String& dst,
const std::vector<String>& layersTypes = std::vector<String>());
/** @brief Create a text representation for a binary network stored in protocol buffer format.
@ -1298,7 +1298,7 @@ CV__DNN_INLINE_NS_BEGIN
*
* @note To reduce output file size, trained weights are not included.
*/
CV_EXPORTS_W void writeTextGraph(const String& model, const String& output);
CV_EXPORTS_W void writeTextGraph(CV_WRAP_FILE_PATH const String& model, CV_WRAP_FILE_PATH const String& output);
/** @brief Performs non maximum suppression given boxes and corresponding scores.
@ -1403,7 +1403,7 @@ CV__DNN_INLINE_NS_BEGIN
* @param[in] model Binary file contains trained weights.
* @param[in] config Text file contains network configuration.
*/
CV_WRAP Model(const String& model, const String& config = "");
CV_WRAP Model(CV_WRAP_FILE_PATH const String& model, CV_WRAP_FILE_PATH const String& config = "");
/**
* @brief Create model from deep learning network.
@ -1508,7 +1508,7 @@ CV__DNN_INLINE_NS_BEGIN
* @param[in] model Binary file contains trained weights.
* @param[in] config Text file contains network configuration.
*/
CV_WRAP ClassificationModel(const String& model, const String& config = "");
CV_WRAP ClassificationModel(CV_WRAP_FILE_PATH const String& model, CV_WRAP_FILE_PATH const String& config = "");
/**
* @brief Create model from deep learning network.
@ -1558,7 +1558,7 @@ CV__DNN_INLINE_NS_BEGIN
* @param[in] model Binary file contains trained weights.
* @param[in] config Text file contains network configuration.
*/
CV_WRAP KeypointsModel(const String& model, const String& config = "");
CV_WRAP KeypointsModel(CV_WRAP_FILE_PATH const String& model, CV_WRAP_FILE_PATH const String& config = "");
/**
* @brief Create model from deep learning network.
@ -1590,7 +1590,7 @@ CV__DNN_INLINE_NS_BEGIN
* @param[in] model Binary file contains trained weights.
* @param[in] config Text file contains network configuration.
*/
CV_WRAP SegmentationModel(const String& model, const String& config = "");
CV_WRAP SegmentationModel(CV_WRAP_FILE_PATH const String& model, CV_WRAP_FILE_PATH const String& config = "");
/**
* @brief Create model from deep learning network.
@ -1621,7 +1621,7 @@ CV__DNN_INLINE_NS_BEGIN
* @param[in] model Binary file contains trained weights.
* @param[in] config Text file contains network configuration.
*/
CV_WRAP DetectionModel(const String& model, const String& config = "");
CV_WRAP DetectionModel(CV_WRAP_FILE_PATH const String& model, CV_WRAP_FILE_PATH const String& config = "");
/**
* @brief Create model from deep learning network.
@ -1687,7 +1687,7 @@ public:
* @param[in] config Text file contains network configuration
*/
CV_WRAP inline
TextRecognitionModel(const std::string& model, const std::string& config = "")
TextRecognitionModel(CV_WRAP_FILE_PATH const std::string& model, CV_WRAP_FILE_PATH const std::string& config = "")
: TextRecognitionModel(readNet(model, config)) { /* nothing */ }
/**
@ -1842,7 +1842,7 @@ public:
* @param[in] config Text file contains network configuration.
*/
CV_WRAP inline
TextDetectionModel_EAST(const std::string& model, const std::string& config = "")
TextDetectionModel_EAST(CV_WRAP_FILE_PATH const std::string& model, CV_WRAP_FILE_PATH const std::string& config = "")
: TextDetectionModel_EAST(readNet(model, config)) { /* nothing */ }
/**
@ -1903,7 +1903,7 @@ public:
* @param[in] config Text file contains network configuration.
*/
CV_WRAP inline
TextDetectionModel_DB(const std::string& model, const std::string& config = "")
TextDetectionModel_DB(CV_WRAP_FILE_PATH const std::string& model, CV_WRAP_FILE_PATH const std::string& config = "")
: TextDetectionModel_DB(readNet(model, config)) { /* nothing */ }
CV_WRAP TextDetectionModel_DB& setBinaryThreshold(float binaryThreshold);

View File

@ -27,7 +27,7 @@ public:
* @param prototxt_path prototxt file path for the super resolution model
* @param model_path model file path for the super resolution model
*/
CV_WRAP BarcodeDetector(const std::string &prototxt_path, const std::string &model_path);
CV_WRAP BarcodeDetector(CV_WRAP_FILE_PATH const std::string &prototxt_path, CV_WRAP_FILE_PATH const std::string &model_path);
~BarcodeDetector();
/** @brief Decodes barcode in image once it's found by the detect() method.

View File

@ -82,8 +82,8 @@ public:
* @param backend_id the id of backend
* @param target_id the id of target device
*/
CV_WRAP static Ptr<FaceDetectorYN> create(const String& model,
const String& config,
CV_WRAP static Ptr<FaceDetectorYN> create(CV_WRAP_FILE_PATH const String& model,
CV_WRAP_FILE_PATH const String& config,
const Size& input_size,
float score_threshold = 0.9f,
float nms_threshold = 0.3f,
@ -154,7 +154,7 @@ public:
* @param backend_id the id of backend
* @param target_id the id of target device
*/
CV_WRAP static Ptr<FaceRecognizerSF> create(const String& model, const String& config, int backend_id = 0, int target_id = 0);
CV_WRAP static Ptr<FaceRecognizerSF> create(CV_WRAP_FILE_PATH const String& model, CV_WRAP_FILE_PATH const String& config, int backend_id = 0, int target_id = 0);
};
//! @}

View File

@ -45,17 +45,20 @@ class ArgInfo
private:
static const uint32_t arg_outputarg_flag = 0x1;
static const uint32_t arg_arithm_op_src_flag = 0x2;
static const uint32_t arg_pathlike_flag = 0x4;
public:
const char* name;
bool outputarg;
bool arithm_op_src;
bool pathlike;
// more fields may be added if necessary
ArgInfo(const char* name_, uint32_t arg_) :
name(name_),
outputarg((arg_ & arg_outputarg_flag) != 0),
arithm_op_src((arg_ & arg_arithm_op_src_flag) != 0) {}
arithm_op_src((arg_ & arg_arithm_op_src_flag) != 0),
pathlike((arg_ & arg_pathlike_flag) != 0) {}
private:
ArgInfo(const ArgInfo&) = delete;

View File

@ -701,6 +701,18 @@ bool pyopencv_to(PyObject* obj, String &value, const ArgInfo& info)
return true;
}
std::string str;
#if ((PY_VERSION_HEX >= 0x03060000) && !defined(Py_LIMITED_API)) || (Py_LIMITED_API >= 0x03060000)
if (info.pathlike)
{
obj = PyOS_FSPath(obj);
if (PyErr_Occurred())
{
failmsg("Expected '%s' to be a str or path-like object", info.name);
return false;
}
}
#endif
if (getUnicodeString(obj, str))
{
value = str;

View File

@ -500,6 +500,10 @@ class ArgInfo(object):
def outputarg(self):
return '/O' in self._modifiers or '/IO' in self._modifiers
@property
def pathlike(self):
return '/PATH' in self._modifiers
@property
def returnarg(self):
return self.outputarg
@ -523,6 +527,7 @@ class ArgInfo(object):
def crepr(self):
arg = 0x01 if self.outputarg else 0x0
arg += 0x02 if self.arithm_op_src_arg else 0x0
arg += 0x04 if self.pathlike else 0x0
return "ArgInfo(\"%s\", %d)" % (self.name, arg)

View File

@ -90,6 +90,10 @@ class CppHeaderParser(object):
modlist.append("/IO")
arg_str = arg_str.replace("CV_IN_OUT", "")
if "CV_WRAP_FILE_PATH" in arg_str:
modlist.append("/PATH")
arg_str = arg_str.replace("CV_WRAP_FILE_PATH", "")
isarray = False
npos = arg_str.find("CV_CARRAY")
if npos >= 0:
@ -627,6 +631,8 @@ class CppHeaderParser(object):
("noArray", arg_type)]).strip()
if '/IO' in modlist and '/O' in modlist:
modlist.remove('/O')
if (arg_name.lower() == 'filename' or arg_name.lower() == 'filepath') and '/PATH' not in modlist:
modlist.append('/PATH')
args.append([arg_type, arg_name, defval, modlist])
npos = arg_start-1

View File

@ -0,0 +1,38 @@
from tests_common import NewOpenCVTests, unittest
import cv2 as cv
import os
def import_path():
import sys
if sys.version_info[0] < 3 or sys.version_info[1] < 6:
raise unittest.SkipTest('Python 3.6+ required')
from pathlib import Path
return Path
class CanPassPathLike(NewOpenCVTests):
def test_pathlib_path(self):
Path = import_path()
img_path = self.find_file('cv/imgproc/stuff.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
image_from_str = cv.imread(img_path)
self.assertIsNotNone(image_from_str)
image_from_path = cv.imread(Path(img_path))
self.assertIsNotNone(image_from_path)
def test_type_mismatch(self):
import_path() # checks python version
with self.assertRaises(TypeError) as context:
cv.imread(123)
self.assertTrue('str or path-like' in str(context.exception))
if __name__ == '__main__':
NewOpenCVTests.bootstrap()