mirror of
https://github.com/opencv/opencv.git
synced 2025-06-08 01:53:19 +08:00
Merge pull request #21775 from luzpaz:typos/gapi
* GAPI: fix various gapi related typos Fixes source comments and documentation related to gapi code. * Fix source typos * Fixed typos requested * Follow-up typo fix
This commit is contained in:
parent
03c9648f2e
commit
554d08c3a1
@ -592,7 +592,7 @@ subgraph cluster_3 {style=filled;color=azure2; C};
|
|||||||
|
|
||||||
*** But how does it run?
|
*** But how does it run?
|
||||||
|
|
||||||
- Since ~infer~ is an *Operation*, backends may provide *Kernels* implenting it;
|
- Since ~infer~ is an *Operation*, backends may provide *Kernels* implementing it;
|
||||||
- The only publicly available inference backend now is *OpenVINO™*:
|
- The only publicly available inference backend now is *OpenVINO™*:
|
||||||
- Brings its ~infer~ kernel atop of the Inference Engine;
|
- Brings its ~infer~ kernel atop of the Inference Engine;
|
||||||
- NN model data is passed through G-API compile arguments (like kernels);
|
- NN model data is passed through G-API compile arguments (like kernels);
|
||||||
|
@ -381,7 +381,7 @@ public:
|
|||||||
*
|
*
|
||||||
* @note The value of `cv::GArray<T>` may be overwritten by assigning some
|
* @note The value of `cv::GArray<T>` may be overwritten by assigning some
|
||||||
* other `cv::GArray<T>` to the object using `operator=` -- on the
|
* other `cv::GArray<T>` to the object using `operator=` -- on the
|
||||||
* assigment, the old association or value is discarded.
|
* assignment, the old association or value is discarded.
|
||||||
*
|
*
|
||||||
* @param v a std::vector<T> to associate with this
|
* @param v a std::vector<T> to associate with this
|
||||||
* `cv::GArray<T>` object. Vector data is copied into the
|
* `cv::GArray<T>` object. Vector data is copied into the
|
||||||
|
@ -48,7 +48,7 @@ struct GOrigin;
|
|||||||
* operations like BGR(), Y(), UV() -- these operations provide
|
* operations like BGR(), Y(), UV() -- these operations provide
|
||||||
* access to frame's data in the familiar cv::GMat form, which can be
|
* access to frame's data in the familiar cv::GMat form, which can be
|
||||||
* used with the majority of the existing G-API operations. These
|
* used with the majority of the existing G-API operations. These
|
||||||
* accessor functions may perform color space converion on the fly if
|
* accessor functions may perform color space conversion on the fly if
|
||||||
* the image format of the GFrame they are applied to differs from the
|
* the image format of the GFrame they are applied to differs from the
|
||||||
* operation's semantic (e.g. the BGR() accessor is called on an NV12
|
* operation's semantic (e.g. the BGR() accessor is called on an NV12
|
||||||
* image frame).
|
* image frame).
|
||||||
|
@ -63,7 +63,7 @@ public:
|
|||||||
*
|
*
|
||||||
* @note The value of GScalar may be overwritten by assigning some
|
* @note The value of GScalar may be overwritten by assigning some
|
||||||
* other GScalar to the object using `operator=` -- on the
|
* other GScalar to the object using `operator=` -- on the
|
||||||
* assigment, the old GScalar value is discarded.
|
* assignment, the old GScalar value is discarded.
|
||||||
*
|
*
|
||||||
* @param s a cv::Scalar value to associate with this GScalar object.
|
* @param s a cv::Scalar value to associate with this GScalar object.
|
||||||
*/
|
*/
|
||||||
|
@ -225,7 +225,7 @@ public:
|
|||||||
* setSource() to run the graph on a new video stream.
|
* setSource() to run the graph on a new video stream.
|
||||||
*
|
*
|
||||||
* @overload
|
* @overload
|
||||||
* @param args arguments used to contruct and initialize a stream
|
* @param args arguments used to construct and initialize a stream
|
||||||
* source.
|
* source.
|
||||||
*/
|
*/
|
||||||
template<typename T, typename... Args>
|
template<typename T, typename... Args>
|
||||||
|
@ -204,7 +204,7 @@ namespace detail
|
|||||||
{
|
{
|
||||||
static_assert(!(cv::detail::has_gshape<GTypeTraits<U>>::value
|
static_assert(!(cv::detail::has_gshape<GTypeTraits<U>>::value
|
||||||
|| cv::detail::contains<typename std::decay<U>::type, GAPI_OWN_TYPES_LIST>::value),
|
|| cv::detail::contains<typename std::decay<U>::type, GAPI_OWN_TYPES_LIST>::value),
|
||||||
"gin/gout must not be used with G* classses or cv::gapi::own::*");
|
"gin/gout must not be used with G* classes or cv::gapi::own::*");
|
||||||
return GTypeTraits<T>::wrap_out(u);
|
return GTypeTraits<T>::wrap_out(u);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -1241,7 +1241,7 @@ or column if there are N channels, or have N columns if there is a single channe
|
|||||||
@param src Input set of 2D points stored in one of possible containers: Mat,
|
@param src Input set of 2D points stored in one of possible containers: Mat,
|
||||||
std::vector<cv::Point2i>, std::vector<cv::Point2f>, std::vector<cv::Point2d>.
|
std::vector<cv::Point2i>, std::vector<cv::Point2f>, std::vector<cv::Point2d>.
|
||||||
@param distType Distance used by the M-estimator, see #DistanceTypes. @ref DIST_USER
|
@param distType Distance used by the M-estimator, see #DistanceTypes. @ref DIST_USER
|
||||||
and @ref DIST_C are not suppored.
|
and @ref DIST_C are not supported.
|
||||||
@param param Numerical parameter ( C ) for some types of distances. If it is 0, an optimal value
|
@param param Numerical parameter ( C ) for some types of distances. If it is 0, an optimal value
|
||||||
is chosen.
|
is chosen.
|
||||||
@param reps Sufficient accuracy for the radius (distance between the coordinate origin and the
|
@param reps Sufficient accuracy for the radius (distance between the coordinate origin and the
|
||||||
@ -1313,7 +1313,7 @@ or column if there are N channels, or have N columns if there is a single channe
|
|||||||
@param src Input set of 3D points stored in one of possible containers: Mat,
|
@param src Input set of 3D points stored in one of possible containers: Mat,
|
||||||
std::vector<cv::Point3i>, std::vector<cv::Point3f>, std::vector<cv::Point3d>.
|
std::vector<cv::Point3i>, std::vector<cv::Point3f>, std::vector<cv::Point3d>.
|
||||||
@param distType Distance used by the M-estimator, see #DistanceTypes. @ref DIST_USER
|
@param distType Distance used by the M-estimator, see #DistanceTypes. @ref DIST_USER
|
||||||
and @ref DIST_C are not suppored.
|
and @ref DIST_C are not supported.
|
||||||
@param param Numerical parameter ( C ) for some types of distances. If it is 0, an optimal value
|
@param param Numerical parameter ( C ) for some types of distances. If it is 0, an optimal value
|
||||||
is chosen.
|
is chosen.
|
||||||
@param reps Sufficient accuracy for the radius (distance between the coordinate origin and the
|
@param reps Sufficient accuracy for the radius (distance between the coordinate origin and the
|
||||||
|
@ -126,7 +126,7 @@ public:
|
|||||||
|
|
||||||
The function is used to associate data of graph outputs with output layers of
|
The function is used to associate data of graph outputs with output layers of
|
||||||
network topology. If a network has only one output layer, there is no need to call it
|
network topology. If a network has only one output layer, there is no need to call it
|
||||||
as the layer is associated with ouput automatically but this doesn't prevent
|
as the layer is associated with output automatically but this doesn't prevent
|
||||||
you from doing it yourself. Count of names has to match to number of network
|
you from doing it yourself. Count of names has to match to number of network
|
||||||
outputs or you can set your own output but for this case you have to
|
outputs or you can set your own output but for this case you have to
|
||||||
additionally use @ref cfgPostProc function.
|
additionally use @ref cfgPostProc function.
|
||||||
|
@ -95,7 +95,7 @@ GAPI_EXPORTS_W GArray<Rect> parseSSD(const GMat& in,
|
|||||||
/** @brief Parses output of Yolo network.
|
/** @brief Parses output of Yolo network.
|
||||||
|
|
||||||
Extracts detection information (box, confidence, label) from Yolo output,
|
Extracts detection information (box, confidence, label) from Yolo output,
|
||||||
filters it by given confidence and performs non-maximum supression for overlapping boxes.
|
filters it by given confidence and performs non-maximum suppression for overlapping boxes.
|
||||||
|
|
||||||
@note Function textual ID is "org.opencv.nn.parsers.parseYolo"
|
@note Function textual ID is "org.opencv.nn.parsers.parseYolo"
|
||||||
|
|
||||||
@ -105,7 +105,7 @@ where num_classes - a number of classes Yolo network was trained with.
|
|||||||
@param inSz Size to project detected boxes to (size of the input image).
|
@param inSz Size to project detected boxes to (size of the input image).
|
||||||
@param confidenceThreshold If confidence of the
|
@param confidenceThreshold If confidence of the
|
||||||
detection is smaller than confidence threshold, detection is rejected.
|
detection is smaller than confidence threshold, detection is rejected.
|
||||||
@param nmsThreshold Non-maximum supression threshold which controls minimum
|
@param nmsThreshold Non-maximum suppression threshold which controls minimum
|
||||||
relative box intersection area required for rejecting the box with a smaller confidence.
|
relative box intersection area required for rejecting the box with a smaller confidence.
|
||||||
If 1.f, nms is not performed and no boxes are rejected.
|
If 1.f, nms is not performed and no boxes are rejected.
|
||||||
@param anchors Anchors Yolo network was trained with.
|
@param anchors Anchors Yolo network was trained with.
|
||||||
|
@ -27,7 +27,7 @@ struct EncoderConfig {
|
|||||||
*/
|
*/
|
||||||
enum class Profile: int { H264_BASELINE, H264_HIGH, H264_MAIN, H265_MAIN, MJPEG };
|
enum class Profile: int { H264_BASELINE, H264_HIGH, H264_MAIN, H265_MAIN, MJPEG };
|
||||||
/**
|
/**
|
||||||
* Specifies prefered bitrate (kb) of compressed output bitstream
|
* Specifies preferred bitrate (kb) of compressed output bitstream
|
||||||
*/
|
*/
|
||||||
std::int32_t bitrate = 8000;
|
std::int32_t bitrate = 8000;
|
||||||
/**
|
/**
|
||||||
|
@ -241,7 +241,7 @@ GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::Scalar &s);
|
|||||||
GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::Mat &m);
|
GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::Mat &m);
|
||||||
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::Mat &m);
|
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::Mat &m);
|
||||||
|
|
||||||
// FIXME: for GRunArgs serailization
|
// FIXME: for GRunArgs serialization
|
||||||
#if !defined(GAPI_STANDALONE)
|
#if !defined(GAPI_STANDALONE)
|
||||||
GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::UMat & um);
|
GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::UMat & um);
|
||||||
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::UMat & um);
|
GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::UMat & um);
|
||||||
|
@ -67,7 +67,7 @@ G desync(const G &g) {
|
|||||||
* always produce their full output vectors.
|
* always produce their full output vectors.
|
||||||
*
|
*
|
||||||
* This operation only makes sense when a GComputation is compiled in
|
* This operation only makes sense when a GComputation is compiled in
|
||||||
* straming mode with cv::GComputation::compileStreaming(). If this
|
* streaming mode with cv::GComputation::compileStreaming(). If this
|
||||||
* operation is used and there are desynchronized outputs, the user
|
* operation is used and there are desynchronized outputs, the user
|
||||||
* should use a special version of cv::GStreamingCompiled::pull()
|
* should use a special version of cv::GStreamingCompiled::pull()
|
||||||
* which produces an array of cv::util::optional<> objects.
|
* which produces an array of cv::util::optional<> objects.
|
||||||
|
@ -22,14 +22,14 @@ namespace onevpl {
|
|||||||
/**
|
/**
|
||||||
* @brief Public class is using for creation of onevpl::GSource instances.
|
* @brief Public class is using for creation of onevpl::GSource instances.
|
||||||
*
|
*
|
||||||
* Class members availaible through methods @ref CfgParam::get_name() and @ref CfgParam::get_value() are used by
|
* Class members available through methods @ref CfgParam::get_name() and @ref CfgParam::get_value() are used by
|
||||||
* onevpl::GSource inner logic to create or find oneVPL particular implementation
|
* onevpl::GSource inner logic to create or find oneVPL particular implementation
|
||||||
* (software/hardware, specific API version and etc.).
|
* (software/hardware, specific API version and etc.).
|
||||||
*
|
*
|
||||||
* @note Because oneVPL may provide several implementations which are satisfying with multiple (or single one) @ref CfgParam
|
* @note Because oneVPL may provide several implementations which are satisfying with multiple (or single one) @ref CfgParam
|
||||||
* criteria therefore it is possible to configure `preferred` parameters. This kind of CfgParams are created
|
* criteria therefore it is possible to configure `preferred` parameters. This kind of CfgParams are created
|
||||||
* using `is_major = false` argument in @ref CfgParam::create method and are not used by creating oneVPL particular implementations.
|
* using `is_major = false` argument in @ref CfgParam::create method and are not used by creating oneVPL particular implementations.
|
||||||
* Instead they fill out a "score table" to select preferrable implementation from available list. Implementation are satisfying
|
* Instead they fill out a "score table" to select preferable implementation from available list. Implementation are satisfying
|
||||||
* with most of these optional params would be chosen.
|
* with most of these optional params would be chosen.
|
||||||
* If no one optional CfgParam params were present then first of available oneVPL implementation would be applied.
|
* If no one optional CfgParam params were present then first of available oneVPL implementation would be applied.
|
||||||
* Please get on https://spec.oneapi.io/versions/latest/elements/oneVPL/source/API_ref/VPL_disp_api_func.html?highlight=mfxcreateconfig#mfxsetconfigfilterproperty
|
* Please get on https://spec.oneapi.io/versions/latest/elements/oneVPL/source/API_ref/VPL_disp_api_func.html?highlight=mfxcreateconfig#mfxsetconfigfilterproperty
|
||||||
|
@ -27,7 +27,7 @@ private:
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct GAPI_EXPORTS DataProviderSystemErrorException final : public DataProviderException {
|
struct GAPI_EXPORTS DataProviderSystemErrorException final : public DataProviderException {
|
||||||
DataProviderSystemErrorException(int error_code, const std::string& desription = std::string());
|
DataProviderSystemErrorException(int error_code, const std::string& description = std::string());
|
||||||
~DataProviderSystemErrorException() = default;
|
~DataProviderSystemErrorException() = default;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -218,7 +218,7 @@ def op(op_id, in_types, out_types):
|
|||||||
|
|
||||||
for i, t in enumerate(out_types):
|
for i, t in enumerate(out_types):
|
||||||
if t not in [cv.GMat, cv.GScalar, *garray_types, *gopaque_types]:
|
if t not in [cv.GMat, cv.GScalar, *garray_types, *gopaque_types]:
|
||||||
raise Exception('{} unsupported output type: {} in possition: {}'
|
raise Exception('{} unsupported output type: {} in position: {}'
|
||||||
.format(cls.__name__, t.__name__, i))
|
.format(cls.__name__, t.__name__, i))
|
||||||
|
|
||||||
def on(*args):
|
def on(*args):
|
||||||
|
@ -701,7 +701,7 @@ static cv::GRunArgs run_py_kernel(cv::detail::PyObjectHolder kernel,
|
|||||||
PyErr_Clear();
|
PyErr_Clear();
|
||||||
throw std::logic_error("Python kernel failed with error!");
|
throw std::logic_error("Python kernel failed with error!");
|
||||||
}
|
}
|
||||||
// NB: In fact it's impossible situation, becase errors were handled above.
|
// NB: In fact it's impossible situation, because errors were handled above.
|
||||||
GAPI_Assert(result.get() && "Python kernel returned NULL!");
|
GAPI_Assert(result.get() && "Python kernel returned NULL!");
|
||||||
|
|
||||||
if (out_info.size() == 1)
|
if (out_info.size() == 1)
|
||||||
@ -811,7 +811,7 @@ static GMetaArgs run_py_meta(cv::detail::PyObjectHolder out_meta,
|
|||||||
PyErr_Clear();
|
PyErr_Clear();
|
||||||
throw std::logic_error("Python outMeta failed with error!");
|
throw std::logic_error("Python outMeta failed with error!");
|
||||||
}
|
}
|
||||||
// NB: In fact it's impossible situation, becase errors were handled above.
|
// NB: In fact it's impossible situation, because errors were handled above.
|
||||||
GAPI_Assert(result.get() && "Python outMeta returned NULL!");
|
GAPI_Assert(result.get() && "Python outMeta returned NULL!");
|
||||||
|
|
||||||
out_metas = PyTuple_Check(result.get()) ? get_meta_args(result.get())
|
out_metas = PyTuple_Check(result.get()) ? get_meta_args(result.get())
|
||||||
|
@ -187,7 +187,7 @@ try:
|
|||||||
blockSize=block_sz,
|
blockSize=block_sz,
|
||||||
useHarrisDetector=use_harris_detector, k=k)
|
useHarrisDetector=use_harris_detector, k=k)
|
||||||
# NB: The operation output is cv::GArray<cv::Pointf>, so it should be mapped
|
# NB: The operation output is cv::GArray<cv::Pointf>, so it should be mapped
|
||||||
# to python paramaters like this: [(1.2, 3.4), (5.2, 3.2)], because the cv::Point2f
|
# to python parameters like this: [(1.2, 3.4), (5.2, 3.2)], because the cv::Point2f
|
||||||
# according to opencv rules mapped to the tuple and cv::GArray<> mapped to the list.
|
# according to opencv rules mapped to the tuple and cv::GArray<> mapped to the list.
|
||||||
# OpenCV returns np.array with shape (n_features, 1, 2), so let's to convert it to list
|
# OpenCV returns np.array with shape (n_features, 1, 2), so let's to convert it to list
|
||||||
# tuples with size == n_features.
|
# tuples with size == n_features.
|
||||||
@ -203,7 +203,7 @@ try:
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def outMeta(desc):
|
def outMeta(desc):
|
||||||
raise NotImplementedError("outMeta isn't imlemented")
|
raise NotImplementedError("outMeta isn't implemented")
|
||||||
return Op
|
return Op
|
||||||
|
|
||||||
|
|
||||||
@ -605,7 +605,7 @@ try:
|
|||||||
img1 = np.array([1, 2, 3])
|
img1 = np.array([1, 2, 3])
|
||||||
|
|
||||||
# FIXME: Cause Bad variant access.
|
# FIXME: Cause Bad variant access.
|
||||||
# Need to provide more descriptive error messsage.
|
# Need to provide more descriptive error message.
|
||||||
with self.assertRaises(Exception): comp.apply(cv.gin(img0, img1),
|
with self.assertRaises(Exception): comp.apply(cv.gin(img0, img1),
|
||||||
args=cv.gapi.compile_args(
|
args=cv.gapi.compile_args(
|
||||||
cv.gapi.kernels(GAddImpl)))
|
cv.gapi.kernels(GAddImpl)))
|
||||||
|
@ -454,7 +454,7 @@ try:
|
|||||||
else:
|
else:
|
||||||
raise unittest.SkipTest(str(e))
|
raise unittest.SkipTest(str(e))
|
||||||
except SystemError as e:
|
except SystemError as e:
|
||||||
raise unittest.SkipTest(str(e) + ", casued by " + str(e.__cause__))
|
raise unittest.SkipTest(str(e) + ", caused by " + str(e.__cause__))
|
||||||
|
|
||||||
|
|
||||||
def test_gst_multiple_sources(self):
|
def test_gst_multiple_sources(self):
|
||||||
|
@ -33,7 +33,7 @@ const std::string keys =
|
|||||||
"{ thrr | 0.7 | MTCNN R confidence threshold}"
|
"{ thrr | 0.7 | MTCNN R confidence threshold}"
|
||||||
"{ thro | 0.7 | MTCNN O confidence threshold}"
|
"{ thro | 0.7 | MTCNN O confidence threshold}"
|
||||||
"{ half_scale | false | MTCNN P use half scale pyramid}"
|
"{ half_scale | false | MTCNN P use half scale pyramid}"
|
||||||
"{ queue_capacity | 1 | Streaming executor queue capacity. Calculated automaticaly if 0}"
|
"{ queue_capacity | 1 | Streaming executor queue capacity. Calculated automatically if 0}"
|
||||||
;
|
;
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
@ -488,8 +488,8 @@ static inline std::string get_pnet_level_name(const cv::Size &in_size) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
int calculate_scales(const cv::Size &input_size, std::vector<double> &out_scales, std::vector<cv::Size> &out_sizes ) {
|
int calculate_scales(const cv::Size &input_size, std::vector<double> &out_scales, std::vector<cv::Size> &out_sizes ) {
|
||||||
//calculate multi - scale and limit the maxinum side to 1000
|
//calculate multi - scale and limit the maximum side to 1000
|
||||||
//pr_scale: limit the maxinum side to 1000, < 1.0
|
//pr_scale: limit the maximum side to 1000, < 1.0
|
||||||
double pr_scale = 1.0;
|
double pr_scale = 1.0;
|
||||||
double h = static_cast<double>(input_size.height);
|
double h = static_cast<double>(input_size.height);
|
||||||
double w = static_cast<double>(input_size.width);
|
double w = static_cast<double>(input_size.width);
|
||||||
@ -602,7 +602,7 @@ int main(int argc, char* argv[]) {
|
|||||||
cv::GArray<custom::Face> final_p_faces_for_bb2squares = custom::ApplyRegression::on(faces0, true);
|
cv::GArray<custom::Face> final_p_faces_for_bb2squares = custom::ApplyRegression::on(faces0, true);
|
||||||
cv::GArray<custom::Face> final_faces_pnet0 = custom::BBoxesToSquares::on(final_p_faces_for_bb2squares);
|
cv::GArray<custom::Face> final_faces_pnet0 = custom::BBoxesToSquares::on(final_p_faces_for_bb2squares);
|
||||||
total_faces[0] = custom::RunNMS::on(final_faces_pnet0, 0.5f, false);
|
total_faces[0] = custom::RunNMS::on(final_faces_pnet0, 0.5f, false);
|
||||||
//The rest PNet pyramid layers to accumlate all layers result in total_faces[PYRAMID_LEVELS - 1]]
|
//The rest PNet pyramid layers to accumulate all layers result in total_faces[PYRAMID_LEVELS - 1]]
|
||||||
for (int i = 1; i < pyramid_levels; ++i)
|
for (int i = 1; i < pyramid_levels; ++i)
|
||||||
{
|
{
|
||||||
std::tie(regressions[i], scores[i]) = run_mtcnn_p(in_transposedRGB, get_pnet_level_name(level_size[i]));
|
std::tie(regressions[i], scores[i]) = run_mtcnn_p(in_transposedRGB, get_pnet_level_name(level_size[i]));
|
||||||
|
@ -150,7 +150,7 @@ int main(int argc, char *argv[])
|
|||||||
auto networks = cv::gapi::networks(face_net);
|
auto networks = cv::gapi::networks(face_net);
|
||||||
|
|
||||||
// Now build the graph. The graph structure may vary
|
// Now build the graph. The graph structure may vary
|
||||||
// pased on the input parameters
|
// passed on the input parameters
|
||||||
cv::GStreamingCompiled pipeline;
|
cv::GStreamingCompiled pipeline;
|
||||||
auto inputs = cv::gin(cv::gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(input));
|
auto inputs = cv::gin(cv::gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(input));
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ const std::string keys =
|
|||||||
"{ facem | face-detection-adas-0001.xml | Path to OpenVINO IE face detection model (.xml) }"
|
"{ facem | face-detection-adas-0001.xml | Path to OpenVINO IE face detection model (.xml) }"
|
||||||
"{ faced | AUTO | Target device for face detection model (e.g. AUTO, GPU, VPU, ...) }"
|
"{ faced | AUTO | Target device for face detection model (e.g. AUTO, GPU, VPU, ...) }"
|
||||||
"{ cfg_params | <prop name>:<value>;<prop name>:<value> | Semicolon separated list of oneVPL mfxVariants which is used for configuring source (see `MFXSetConfigFilterProperty` by https://spec.oneapi.io/versions/latest/elements/oneVPL/source/index.html) }"
|
"{ cfg_params | <prop name>:<value>;<prop name>:<value> | Semicolon separated list of oneVPL mfxVariants which is used for configuring source (see `MFXSetConfigFilterProperty` by https://spec.oneapi.io/versions/latest/elements/oneVPL/source/index.html) }"
|
||||||
"{ streaming_queue_capacity | 1 | Streaming executor queue capacity. Calculated automaticaly if 0 }"
|
"{ streaming_queue_capacity | 1 | Streaming executor queue capacity. Calculated automatically if 0 }"
|
||||||
"{ frames_pool_size | 0 | OneVPL source applies this parameter as preallocated frames pool size}"
|
"{ frames_pool_size | 0 | OneVPL source applies this parameter as preallocated frames pool size}"
|
||||||
"{ vpp_frames_pool_size | 0 | OneVPL source applies this parameter as preallocated frames pool size for VPP preprocessing results}"
|
"{ vpp_frames_pool_size | 0 | OneVPL source applies this parameter as preallocated frames pool size for VPP preprocessing results}"
|
||||||
"{ roi | -1,-1,-1,-1 | Region of interest (ROI) to use for inference. Identified automatically when not set }";
|
"{ roi | -1,-1,-1,-1 | Region of interest (ROI) to use for inference. Identified automatically when not set }";
|
||||||
@ -281,7 +281,7 @@ int main(int argc, char *argv[]) {
|
|||||||
const auto source_vpp_queue_capacity = cmd.get<uint32_t>("vpp_frames_pool_size");
|
const auto source_vpp_queue_capacity = cmd.get<uint32_t>("vpp_frames_pool_size");
|
||||||
const auto device_id = cmd.get<std::string>("faced");
|
const auto device_id = cmd.get<std::string>("faced");
|
||||||
|
|
||||||
// check ouput file extension
|
// check output file extension
|
||||||
if (!output.empty()) {
|
if (!output.empty()) {
|
||||||
auto ext = output.find_last_of(".");
|
auto ext = output.find_last_of(".");
|
||||||
if (ext == std::string::npos || (output.substr(ext + 1) != "avi")) {
|
if (ext == std::string::npos || (output.substr(ext + 1) != "avi")) {
|
||||||
@ -421,7 +421,7 @@ int main(int argc, char *argv[]) {
|
|||||||
} else {
|
} else {
|
||||||
cap = cv::gapi::wip::make_onevpl_src(file_path, source_cfgs);
|
cap = cv::gapi::wip::make_onevpl_src(file_path, source_cfgs);
|
||||||
}
|
}
|
||||||
std::cout << "oneVPL source desription: " << cap->descr_of() << std::endl;
|
std::cout << "oneVPL source description: " << cap->descr_of() << std::endl;
|
||||||
} catch (const std::exception& ex) {
|
} catch (const std::exception& ex) {
|
||||||
std::cerr << "Cannot create source: " << ex.what() << std::endl;
|
std::cerr << "Cannot create source: " << ex.what() << std::endl;
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -70,7 +70,7 @@ bool DummySource::pull(cv::gapi::wip::Data& data) {
|
|||||||
* update current seq_id correspondingly.
|
* update current seq_id correspondingly.
|
||||||
*
|
*
|
||||||
* if drop_frames is enabled, wait for the next tick, otherwise
|
* if drop_frames is enabled, wait for the next tick, otherwise
|
||||||
* return last writen frame (+2 at the picture above) immediately.
|
* return last written frame (+2 at the picture above) immediately.
|
||||||
*/
|
*/
|
||||||
int64_t num_frames =
|
int64_t num_frames =
|
||||||
static_cast<int64_t>((curr_ts - m_next_tick_ts) / m_latency);
|
static_cast<int64_t>((curr_ts - m_next_tick_ts) / m_latency);
|
||||||
|
@ -39,7 +39,7 @@ cv::GMat cv::gapi::streaming::desync(const cv::GMat &g) {
|
|||||||
//
|
//
|
||||||
// At the same time, generally, every island in the streaming
|
// At the same time, generally, every island in the streaming
|
||||||
// graph gets its individual input as a queue (so normally, a
|
// graph gets its individual input as a queue (so normally, a
|
||||||
// writer pushes the same output MULTIPLE TIMES if it has mutliple
|
// writer pushes the same output MULTIPLE TIMES if it has multiple
|
||||||
// readers):
|
// readers):
|
||||||
//
|
//
|
||||||
// LWV
|
// LWV
|
||||||
|
@ -173,7 +173,7 @@ namespace magazine
|
|||||||
// without utilizing magazine at all
|
// without utilizing magazine at all
|
||||||
void GAPI_EXPORTS bindInArg (Mag& mag, const RcDesc &rc, const GRunArg &arg, HandleRMat handleRMat = HandleRMat::BIND);
|
void GAPI_EXPORTS bindInArg (Mag& mag, const RcDesc &rc, const GRunArg &arg, HandleRMat handleRMat = HandleRMat::BIND);
|
||||||
|
|
||||||
// Extracts a memory object reference fro GRunArgP, stores it in appropriate slot in a magazine
|
// Extracts a memory object reference from GRunArgP, stores it in appropriate slot in a magazine
|
||||||
// Note on RMat handling from bindInArg above is also applied here
|
// Note on RMat handling from bindInArg above is also applied here
|
||||||
void GAPI_EXPORTS bindOutArg(Mag& mag, const RcDesc &rc, const GRunArgP &arg, HandleRMat handleRMat = HandleRMat::BIND);
|
void GAPI_EXPORTS bindOutArg(Mag& mag, const RcDesc &rc, const GRunArgP &arg, HandleRMat handleRMat = HandleRMat::BIND);
|
||||||
|
|
||||||
|
@ -164,7 +164,7 @@ GAPI_EXPORTS void serialize( IOStream& os
|
|||||||
GAPI_EXPORTS GSerialized deserialize(IIStream& is);
|
GAPI_EXPORTS GSerialized deserialize(IIStream& is);
|
||||||
GAPI_EXPORTS void reconstruct(const GSerialized &s, ade::Graph &g);
|
GAPI_EXPORTS void reconstruct(const GSerialized &s, ade::Graph &g);
|
||||||
|
|
||||||
// FIXME: Basic Stream implementaions //////////////////////////////////////////
|
// FIXME: Basic Stream implementations /////////////////////////////////////////
|
||||||
|
|
||||||
// Basic in-memory stream implementations.
|
// Basic in-memory stream implementations.
|
||||||
class GAPI_EXPORTS ByteMemoryOutStream final: public IOStream {
|
class GAPI_EXPORTS ByteMemoryOutStream final: public IOStream {
|
||||||
|
@ -460,7 +460,7 @@ public:
|
|||||||
const IEUnit &uu;
|
const IEUnit &uu;
|
||||||
cv::gimpl::GIslandExecutable::IOutput &out;
|
cv::gimpl::GIslandExecutable::IOutput &out;
|
||||||
|
|
||||||
// NB: Need to gurantee that MediaFrame::View doesn't die until request is over.
|
// NB: Need to guarantee that MediaFrame::View doesn't die until request is over.
|
||||||
using Views = std::vector<std::unique_ptr<cv::MediaFrame::View>>;
|
using Views = std::vector<std::unique_ptr<cv::MediaFrame::View>>;
|
||||||
Views views;
|
Views views;
|
||||||
|
|
||||||
@ -963,7 +963,7 @@ cv::gimpl::ie::GIEExecutable::GIEExecutable(const ade::Graph &g,
|
|||||||
|
|
||||||
void cv::gimpl::ie::GIEExecutable::run(cv::gimpl::GIslandExecutable::IInput &in,
|
void cv::gimpl::ie::GIEExecutable::run(cv::gimpl::GIslandExecutable::IInput &in,
|
||||||
cv::gimpl::GIslandExecutable::IOutput &out) {
|
cv::gimpl::GIslandExecutable::IOutput &out) {
|
||||||
// General alghoritm:
|
// General algorithm:
|
||||||
// 1. Collect island inputs/outputs.
|
// 1. Collect island inputs/outputs.
|
||||||
// 2. Create kernel context. (Every kernel has his own context).
|
// 2. Create kernel context. (Every kernel has his own context).
|
||||||
// 3. If the EndOfStream message is recieved, wait until all passed task are done.
|
// 3. If the EndOfStream message is recieved, wait until all passed task are done.
|
||||||
|
@ -766,7 +766,7 @@ cv::gimpl::GOAKExecutable::GOAKExecutable(const ade::Graph& g,
|
|||||||
// 1. Link input nodes to camera
|
// 1. Link input nodes to camera
|
||||||
for (const auto& nh : in_nodes) {
|
for (const auto& nh : in_nodes) {
|
||||||
GAPI_Assert(m_oak_nodes.at(nh).inputs.size() == 1);
|
GAPI_Assert(m_oak_nodes.at(nh).inputs.size() == 1);
|
||||||
// FIXME: cover other camera outputs
|
// FIXME: convert other camera outputs
|
||||||
// Link preview to infer, video to all other nodes
|
// Link preview to infer, video to all other nodes
|
||||||
if (m_oak_infer_info.find(nh) == m_oak_infer_info.end()) {
|
if (m_oak_infer_info.find(nh) == m_oak_infer_info.end()) {
|
||||||
m_camera_input->video.link(*(m_oak_nodes.at(nh).inputs[0]));
|
m_camera_input->video.link(*(m_oak_nodes.at(nh).inputs[0]));
|
||||||
|
@ -1102,7 +1102,7 @@ struct InferList2: public cv::detail::KernelTag {
|
|||||||
} else {
|
} else {
|
||||||
GAPI_Assert(false && "Only Rect and Mat types are supported for infer list 2!");
|
GAPI_Assert(false && "Only Rect and Mat types are supported for infer list 2!");
|
||||||
}
|
}
|
||||||
// }}} (Preapre input)
|
// }}} (Prepare input)
|
||||||
} // }}} (For every input of the net)
|
} // }}} (For every input of the net)
|
||||||
|
|
||||||
std::vector<cv::Mat> out_mats(uu.oc->numOutputs());
|
std::vector<cv::Mat> out_mats(uu.oc->numOutputs());
|
||||||
|
@ -80,7 +80,7 @@ cv::Size cv::gapi::wip::draw::FTTextRender::Priv::getTextSize(const std::wstring
|
|||||||
// See (1) on picture.
|
// See (1) on picture.
|
||||||
//
|
//
|
||||||
// 4) As we can see the last pen position is isn't horizontal size yet.
|
// 4) As we can see the last pen position is isn't horizontal size yet.
|
||||||
// We need to check if the glyph goes beyound the last position of the pen
|
// We need to check if the glyph goes beyond the last position of the pen
|
||||||
// To do this we can:
|
// To do this we can:
|
||||||
// a) Return to the previous position -advance
|
// a) Return to the previous position -advance
|
||||||
// b) Shift on left value +left
|
// b) Shift on left value +left
|
||||||
|
@ -346,9 +346,9 @@ std::string GIslandModel::traceIslandName(const ade::NodeHandle& island_nh, cons
|
|||||||
auto& backend_impl = island_ptr->backend().priv();
|
auto& backend_impl = island_ptr->backend().priv();
|
||||||
std::string backend_impl_type_name = typeid(backend_impl).name();
|
std::string backend_impl_type_name = typeid(backend_impl).name();
|
||||||
|
|
||||||
// NOTE: Major part of already existing backends implementaion classes are called using
|
// NOTE: Major part of already existing backends implementation classes are called using
|
||||||
// "*G[Name]BackendImpl*" pattern.
|
// "*G[Name]BackendImpl*" pattern.
|
||||||
// We are trying to match against this pattern and retrive just [Name] part.
|
// We are trying to match against this pattern and retrieve just [Name] part.
|
||||||
// If matching isn't successful, full mangled class name will be used.
|
// If matching isn't successful, full mangled class name will be used.
|
||||||
//
|
//
|
||||||
// To match we use following algorithm:
|
// To match we use following algorithm:
|
||||||
|
@ -254,7 +254,7 @@ void apply(cv::gimpl::GModel::Graph &g) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Probably the simplest case: desync makes no sense in the regular
|
// Probably the simplest case: desync makes no sense in the regular
|
||||||
// compilation process, so just drop all its occurences in the graph,
|
// compilation process, so just drop all its occurrences in the graph,
|
||||||
// reconnecting nodes properly.
|
// reconnecting nodes properly.
|
||||||
void drop(cv::gimpl::GModel::Graph &g) {
|
void drop(cv::gimpl::GModel::Graph &g) {
|
||||||
// FIXME: LOG here that we're dropping the desync operations as
|
// FIXME: LOG here that we're dropping the desync operations as
|
||||||
|
@ -212,7 +212,7 @@ void cv::gimpl::passes::resolveKernels(ade::passes::PassContext &ctx,
|
|||||||
GAPI_Assert(op.k.outMeta == nullptr);
|
GAPI_Assert(op.k.outMeta == nullptr);
|
||||||
const_cast<cv::GKernel::M&>(op.k.outMeta) = selected_impl.outMeta;
|
const_cast<cv::GKernel::M&>(op.k.outMeta) = selected_impl.outMeta;
|
||||||
} else {
|
} else {
|
||||||
// Sanity check: the metadata funciton must be present
|
// Sanity check: the metadata function must be present
|
||||||
GAPI_Assert(op.k.outMeta != nullptr);
|
GAPI_Assert(op.k.outMeta != nullptr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -153,7 +153,7 @@ void writeBackExec(const Mag& mag, const RcDesc &rc, GRunArgP &g_arg)
|
|||||||
{
|
{
|
||||||
case GRunArgP::index_of<cv::Mat*>() : {
|
case GRunArgP::index_of<cv::Mat*>() : {
|
||||||
// If there is a copy intrinsic at the end of the graph
|
// If there is a copy intrinsic at the end of the graph
|
||||||
// we need to actualy copy the data to the user buffer
|
// we need to actually copy the data to the user buffer
|
||||||
// since output runarg was optimized to simply point
|
// since output runarg was optimized to simply point
|
||||||
// to the input of the copy kernel
|
// to the input of the copy kernel
|
||||||
// FIXME:
|
// FIXME:
|
||||||
|
@ -1520,7 +1520,7 @@ cv::gimpl::GStreamingExecutor::GStreamingExecutor(std::unique_ptr<ade::Graph> &&
|
|||||||
|
|
||||||
cv::gimpl::GStreamingExecutor::~GStreamingExecutor()
|
cv::gimpl::GStreamingExecutor::~GStreamingExecutor()
|
||||||
{
|
{
|
||||||
// FIXME: this is a temporary try-catch exception hadling.
|
// FIXME: this is a temporary try-catch exception handling.
|
||||||
// Need to eliminate throwings from stop()
|
// Need to eliminate throwings from stop()
|
||||||
try {
|
try {
|
||||||
if (state == State::READY || state == State::RUNNING)
|
if (state == State::READY || state == State::RUNNING)
|
||||||
@ -1619,7 +1619,7 @@ void cv::gimpl::GStreamingExecutor::setSource(GRunArgs &&ins)
|
|||||||
case T::index_of<cv::gapi::wip::IStreamSource::Ptr>():
|
case T::index_of<cv::gapi::wip::IStreamSource::Ptr>():
|
||||||
#if !defined(GAPI_STANDALONE)
|
#if !defined(GAPI_STANDALONE)
|
||||||
emitter.reset(new VideoEmitter{emit_arg});
|
emitter.reset(new VideoEmitter{emit_arg});
|
||||||
// Currently all video inputs are syncronized if sync policy is to drop,
|
// Currently all video inputs are synchronized if sync policy is to drop,
|
||||||
// there is no different fps branches etc, so all video emitters are registered
|
// there is no different fps branches etc, so all video emitters are registered
|
||||||
video_emitters.emplace_back(emit_nh);
|
video_emitters.emplace_back(emit_nh);
|
||||||
#else
|
#else
|
||||||
|
@ -353,7 +353,7 @@ namespace graph {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ctx.executed++;
|
ctx.executed++;
|
||||||
// reset dependecy_count to initial state to simplify re-execution of the same graph
|
// reset dependency_count to initial state to simplify re-execution of the same graph
|
||||||
node->dependency_count = node->dependencies;
|
node->dependency_count = node->dependencies;
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
|
@ -111,11 +111,11 @@ GstElement* GStreamerPipelineFacade::getElementByName(const std::string& element
|
|||||||
void GStreamerPipelineFacade::completePreroll() {
|
void GStreamerPipelineFacade::completePreroll() {
|
||||||
// FIXME: If there are multiple sources in pipeline and one of them is live, then pipeline
|
// FIXME: If there are multiple sources in pipeline and one of them is live, then pipeline
|
||||||
// will return GST_STATE_CHANGE_NO_PREROLL while pipeline pausing.
|
// will return GST_STATE_CHANGE_NO_PREROLL while pipeline pausing.
|
||||||
// But appsink may not be connected to this live source and only to anothers,
|
// But appsink may not be connected to this live source and only to another,
|
||||||
// not-live ones. So, it is not required to start the playback for appsink to complete
|
// not-live ones. So, it is not required to start the playback for appsink to complete
|
||||||
// the preroll.
|
// the preroll.
|
||||||
// Starting of playback for the not-live sources before the first frame pull will lead
|
// Starting of playback for the not-live sources before the first frame pull will lead
|
||||||
// to loosing of some amount of frames and pulling of the first frame can return frame
|
// to losing of some amount of frames and pulling of the first frame can return frame
|
||||||
// which is far from the first.
|
// which is far from the first.
|
||||||
//
|
//
|
||||||
// Need to handle this case or forbid to mix multiples sources of different
|
// Need to handle this case or forbid to mix multiples sources of different
|
||||||
|
@ -54,7 +54,7 @@ struct VPLAccelerationPolicy
|
|||||||
virtual void init(session_t session) = 0;
|
virtual void init(session_t session) = 0;
|
||||||
virtual void deinit(session_t session) = 0;
|
virtual void deinit(session_t session) = 0;
|
||||||
|
|
||||||
// Limitation: cannot give guarantee in succesful memory realloccation
|
// Limitation: cannot give guarantee in successful memory realloccation
|
||||||
// for existing workspace in existing pool (see realloc)
|
// for existing workspace in existing pool (see realloc)
|
||||||
// thus it is not implemented,
|
// thus it is not implemented,
|
||||||
// PLEASE provide initial memory area large enough
|
// PLEASE provide initial memory area large enough
|
||||||
|
@ -59,7 +59,7 @@ public:
|
|||||||
/**
|
/**
|
||||||
* Extract value thread-safe lock counter (see @ref Surface description).
|
* Extract value thread-safe lock counter (see @ref Surface description).
|
||||||
* It's usual situation that counter may be instantly decreased in other thread after this method called.
|
* It's usual situation that counter may be instantly decreased in other thread after this method called.
|
||||||
* We need instantaneous value. This method syncronized in inter-threading way with @ref Surface::release_lock()
|
* We need instantaneous value. This method synchronized in inter-threading way with @ref Surface::release_lock()
|
||||||
*
|
*
|
||||||
* @return fetched locks count.
|
* @return fetched locks count.
|
||||||
*/
|
*/
|
||||||
|
@ -163,7 +163,7 @@ private:
|
|||||||
* deinitialization called off in `on_unlock`
|
* deinitialization called off in `on_unlock`
|
||||||
* because new `incoming` request had appeared at here before
|
* because new `incoming` request had appeared at here before
|
||||||
* `on_unlock` started deinit procedure in another thread.
|
* `on_unlock` started deinit procedure in another thread.
|
||||||
* So no reinit required because no deinit had happended
|
* So no reinit required because no deinit had happened
|
||||||
*
|
*
|
||||||
* main `busy-wait` request must break busy-wait state
|
* main `busy-wait` request must break busy-wait state
|
||||||
* and become `outgoing` request.
|
* and become `outgoing` request.
|
||||||
|
@ -44,7 +44,7 @@ void VPLLegacyDecodeEngine::try_modify_pool_size_request_param(const char* param
|
|||||||
param_name + ", overflow");
|
param_name + ", overflow");
|
||||||
}
|
}
|
||||||
request.NumFrameSuggested = static_cast<mfxU16>(new_frames_count);
|
request.NumFrameSuggested = static_cast<mfxU16>(new_frames_count);
|
||||||
GAPI_LOG_DEBUG(nullptr, "mfxFrameAllocRequest overriden by user input: " <<
|
GAPI_LOG_DEBUG(nullptr, "mfxFrameAllocRequest overridden by user input: " <<
|
||||||
", mfxFrameAllocRequest.NumFrameMin: " << request.NumFrameMin <<
|
", mfxFrameAllocRequest.NumFrameMin: " << request.NumFrameMin <<
|
||||||
", mfxFrameAllocRequest.NumFrameSuggested: " << request.NumFrameSuggested <<
|
", mfxFrameAllocRequest.NumFrameSuggested: " << request.NumFrameSuggested <<
|
||||||
", mfxFrameAllocRequest.Type: " << request.Type);
|
", mfxFrameAllocRequest.Type: " << request.Type);
|
||||||
@ -152,7 +152,7 @@ VPLLegacyDecodeEngine::VPLLegacyDecodeEngine(std::unique_ptr<VPLAccelerationPoli
|
|||||||
} while (MFX_ERR_NONE == sess.last_status && !my_sess.sync_queue.empty());
|
} while (MFX_ERR_NONE == sess.last_status && !my_sess.sync_queue.empty());
|
||||||
return ExecutionStatus::Continue;
|
return ExecutionStatus::Continue;
|
||||||
},
|
},
|
||||||
// 4) Falls back on generic status procesing
|
// 4) Falls back on generic status processing
|
||||||
[this] (EngineSession& sess) -> ExecutionStatus
|
[this] (EngineSession& sess) -> ExecutionStatus
|
||||||
{
|
{
|
||||||
return this->process_error(sess.last_status, static_cast<LegacyDecodeSession&>(sess));
|
return this->process_error(sess.last_status, static_cast<LegacyDecodeSession&>(sess));
|
||||||
@ -177,7 +177,7 @@ VPLLegacyDecodeEngine::SessionParam VPLLegacyDecodeEngine::prepare_session_param
|
|||||||
mfxVideoParam mfxDecParams {};
|
mfxVideoParam mfxDecParams {};
|
||||||
mfxDecParams.mfx.CodecId = decoder_id_name;
|
mfxDecParams.mfx.CodecId = decoder_id_name;
|
||||||
|
|
||||||
// set memory stream direction accroding to accelearion policy device type
|
// set memory stream direction according to acceleration policy device type
|
||||||
IDeviceSelector::DeviceScoreTable devices = acceleration_policy->get_device_selector()->select_devices();
|
IDeviceSelector::DeviceScoreTable devices = acceleration_policy->get_device_selector()->select_devices();
|
||||||
GAPI_Assert(devices.size() == 1 && "Multiple(or zero) acceleration devices case is unsupported");
|
GAPI_Assert(devices.size() == 1 && "Multiple(or zero) acceleration devices case is unsupported");
|
||||||
AccelType accel_type = devices.begin()->second.get_type();
|
AccelType accel_type = devices.begin()->second.get_type();
|
||||||
@ -252,7 +252,7 @@ VPLLegacyDecodeEngine::SessionParam VPLLegacyDecodeEngine::prepare_session_param
|
|||||||
acceleration_policy->create_surface_pool(decRequest, mfxDecParams.mfx.FrameInfo);
|
acceleration_policy->create_surface_pool(decRequest, mfxDecParams.mfx.FrameInfo);
|
||||||
|
|
||||||
// Input parameters finished, now initialize decode
|
// Input parameters finished, now initialize decode
|
||||||
// create decoder for session accoring to header recovered from source file
|
// create decoder for session according to header recovered from source file
|
||||||
|
|
||||||
sts = MFXVideoDECODE_Init(mfx_session, &mfxDecParams);
|
sts = MFXVideoDECODE_Init(mfx_session, &mfxDecParams);
|
||||||
if (MFX_ERR_NONE != sts) {
|
if (MFX_ERR_NONE != sts) {
|
||||||
|
@ -147,7 +147,7 @@ VPPPreprocEngine::VPPPreprocEngine(std::unique_ptr<VPLAccelerationPolicy>&& acce
|
|||||||
} while (MFX_ERR_NONE == sess.last_status && !my_sess.vpp_out_queue.empty());
|
} while (MFX_ERR_NONE == sess.last_status && !my_sess.vpp_out_queue.empty());
|
||||||
return ExecutionStatus::Continue;
|
return ExecutionStatus::Continue;
|
||||||
},
|
},
|
||||||
// 2) Falls back on generic status procesing
|
// 2) Falls back on generic status processing
|
||||||
[this] (EngineSession& sess) -> ExecutionStatus
|
[this] (EngineSession& sess) -> ExecutionStatus
|
||||||
{
|
{
|
||||||
return this->process_error(sess.last_status, static_cast<session_type&>(sess));
|
return this->process_error(sess.last_status, static_cast<session_type&>(sess));
|
||||||
@ -273,7 +273,7 @@ pp_session VPPPreprocEngine::initialize_preproc(const pp_params& initial_frame_p
|
|||||||
throw std::runtime_error("Cannot execute MFXVideoVPP_QueryIOSurf");
|
throw std::runtime_error("Cannot execute MFXVideoVPP_QueryIOSurf");
|
||||||
}
|
}
|
||||||
|
|
||||||
// NB: Assing ID as upper limit descendant to distinguish specific VPP allocation
|
// NB: Assign ID as upper limit descendant to distinguish specific VPP allocation
|
||||||
// from decode allocations witch started from 0: by local module convention
|
// from decode allocations witch started from 0: by local module convention
|
||||||
|
|
||||||
static uint16_t request_id = 0;
|
static uint16_t request_id = 0;
|
||||||
|
@ -248,7 +248,7 @@ VPLLegacyTranscodeEngine::VPLLegacyTranscodeEngine(std::unique_ptr<VPLAccelerati
|
|||||||
} while (MFX_ERR_NONE == sess.last_status && !my_sess.vpp_queue.empty());
|
} while (MFX_ERR_NONE == sess.last_status && !my_sess.vpp_queue.empty());
|
||||||
return ExecutionStatus::Continue;
|
return ExecutionStatus::Continue;
|
||||||
},
|
},
|
||||||
// 5) Falls back on generic status procesing
|
// 5) Falls back on generic status processing
|
||||||
[this] (EngineSession& sess) -> ExecutionStatus
|
[this] (EngineSession& sess) -> ExecutionStatus
|
||||||
{
|
{
|
||||||
return this->process_error(sess.last_status, static_cast<LegacyDecodeSession&>(sess));
|
return this->process_error(sess.last_status, static_cast<LegacyDecodeSession&>(sess));
|
||||||
@ -358,7 +358,7 @@ VPLLegacyTranscodeEngine::initialize_session(mfxSession mfx_session,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NB: Assing ID as upper limit descendant to distinguish specific VPP allocation
|
// NB: Assign ID as upper limit descendant to distinguish specific VPP allocation
|
||||||
// from decode allocations witch started from 0: by local module convention
|
// from decode allocations witch started from 0: by local module convention
|
||||||
vppRequests[1].AllocId = std::numeric_limits<uint16_t>::max();
|
vppRequests[1].AllocId = std::numeric_limits<uint16_t>::max();
|
||||||
|
|
||||||
|
@ -129,7 +129,7 @@ GSource::Priv::Priv(std::shared_ptr<IDataProvider> provider,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// collect optional-preferred input parameters from input params
|
// collect optional-preferred input parameters from input params
|
||||||
// which may (optionally) or may not be used to choose the most preferrable
|
// which may (optionally) or may not be used to choose the most preferable
|
||||||
// VPL implementation (for example, specific API version or Debug/Release VPL build)
|
// VPL implementation (for example, specific API version or Debug/Release VPL build)
|
||||||
std::vector<CfgParam> preferred_params;
|
std::vector<CfgParam> preferred_params;
|
||||||
std::copy_if(cfg_params.begin(), cfg_params.end(), std::back_inserter(preferred_params),
|
std::copy_if(cfg_params.begin(), cfg_params.end(), std::back_inserter(preferred_params),
|
||||||
@ -137,7 +137,7 @@ GSource::Priv::Priv(std::shared_ptr<IDataProvider> provider,
|
|||||||
std::sort(preferred_params.begin(), preferred_params.end());
|
std::sort(preferred_params.begin(), preferred_params.end());
|
||||||
|
|
||||||
GAPI_LOG_DEBUG(nullptr, "Find MFX better implementation from handle: " << mfx_handle <<
|
GAPI_LOG_DEBUG(nullptr, "Find MFX better implementation from handle: " << mfx_handle <<
|
||||||
" is satisfying preferrable params count: " << preferred_params.size());
|
" is satisfying preferable params count: " << preferred_params.size());
|
||||||
int i = 0;
|
int i = 0;
|
||||||
mfxImplDescription *idesc = nullptr;
|
mfxImplDescription *idesc = nullptr;
|
||||||
std::vector<mfxImplDescription*> available_impl_descriptions;
|
std::vector<mfxImplDescription*> available_impl_descriptions;
|
||||||
@ -162,7 +162,7 @@ GSource::Priv::Priv(std::shared_ptr<IDataProvider> provider,
|
|||||||
GAPI_LOG_INFO(nullptr, "Implementation index: " << i << "\n" << ss.str());
|
GAPI_LOG_INFO(nullptr, "Implementation index: " << i << "\n" << ss.str());
|
||||||
|
|
||||||
// Only one VPL implementation is required for GSource here.
|
// Only one VPL implementation is required for GSource here.
|
||||||
// Let's find intersection params from available impl with preferrable input params
|
// Let's find intersection params from available impl with preferable input params
|
||||||
// to find best match.
|
// to find best match.
|
||||||
// An available VPL implementation with max matching count
|
// An available VPL implementation with max matching count
|
||||||
std::vector<CfgParam> impl_params = get_params_from_string<CfgParam>(ss.str());
|
std::vector<CfgParam> impl_params = get_params_from_string<CfgParam>(ss.str());
|
||||||
@ -178,7 +178,7 @@ GSource::Priv::Priv(std::shared_ptr<IDataProvider> provider,
|
|||||||
// in case of no input preferrance we consider all params are matched
|
// in case of no input preferrance we consider all params are matched
|
||||||
// for the first available VPL implementation. It will be a chosen one
|
// for the first available VPL implementation. It will be a chosen one
|
||||||
matches_count.emplace(impl_params.size(), i++);
|
matches_count.emplace(impl_params.size(), i++);
|
||||||
GAPI_LOG_DEBUG(nullptr, "No preferrable params, use the first one implementation");
|
GAPI_LOG_DEBUG(nullptr, "No preferable params, use the first one implementation");
|
||||||
break;
|
break;
|
||||||
} else {
|
} else {
|
||||||
GAPI_LOG_DEBUG(nullptr, "Equal param intersection count: " << matched_params.size());
|
GAPI_LOG_DEBUG(nullptr, "Equal param intersection count: " << matched_params.size());
|
||||||
|
@ -401,7 +401,7 @@ std::string ext_mem_frame_type_to_cstr(int type) {
|
|||||||
std::stringstream ss;
|
std::stringstream ss;
|
||||||
APPEND_STRINGIFY_MASK_N_ERASE(type, "|", MFX_MEMTYPE_DXVA2_DECODER_TARGET);
|
APPEND_STRINGIFY_MASK_N_ERASE(type, "|", MFX_MEMTYPE_DXVA2_DECODER_TARGET);
|
||||||
APPEND_STRINGIFY_MASK_N_ERASE(type, "|", MFX_MEMTYPE_DXVA2_PROCESSOR_TARGET);
|
APPEND_STRINGIFY_MASK_N_ERASE(type, "|", MFX_MEMTYPE_DXVA2_PROCESSOR_TARGET);
|
||||||
// NB: accoring to VPL source the commented MFX_* constane below are belong to the
|
// NB: according to VPL source the commented MFX_* constane below are belong to the
|
||||||
// same actual integral value as condition abobe. So it is impossible
|
// same actual integral value as condition abobe. So it is impossible
|
||||||
// to distinct them in condition branch. Just put this comment and possible
|
// to distinct them in condition branch. Just put this comment and possible
|
||||||
// constans here...
|
// constans here...
|
||||||
|
@ -13,7 +13,7 @@
|
|||||||
|
|
||||||
#include <opencv2/gapi/util/compiler_hints.hpp>
|
#include <opencv2/gapi/util/compiler_hints.hpp>
|
||||||
|
|
||||||
// NOTE: OPENCV_WITH_ITT is only defined if ITT dependecy is built by OpenCV infrastructure.
|
// NOTE: OPENCV_WITH_ITT is only defined if ITT dependency is built by OpenCV infrastructure.
|
||||||
// There will not be such define in G-API standalone mode.
|
// There will not be such define in G-API standalone mode.
|
||||||
// TODO: Consider using OpenCV's trace.hpp
|
// TODO: Consider using OpenCV's trace.hpp
|
||||||
#if defined(OPENCV_WITH_ITT)
|
#if defined(OPENCV_WITH_ITT)
|
||||||
|
@ -415,7 +415,7 @@ TEST(StatefulKernel, StateIsInitViaCompArgs)
|
|||||||
// Allowing 1% difference of all pixels between G-API and OpenCV results
|
// Allowing 1% difference of all pixels between G-API and OpenCV results
|
||||||
compareBackSubResults(gapiForeground, ocvForeground, 1);
|
compareBackSubResults(gapiForeground, ocvForeground, 1);
|
||||||
|
|
||||||
// Additionally, test the case where state is resetted
|
// Additionally, test the case where state is reset
|
||||||
gapiBackSub.prepareForNewStream();
|
gapiBackSub.prepareForNewStream();
|
||||||
gapiBackSub(cv::gin(frame), cv::gout(gapiForeground));
|
gapiBackSub(cv::gin(frame), cv::gout(gapiForeground));
|
||||||
pOcvBackSub->apply(frame, ocvForeground);
|
pOcvBackSub->apply(frame, ocvForeground);
|
||||||
|
@ -67,7 +67,7 @@ namespace
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// These definitons test the correct macro work if the kernel has multiple output values
|
// These definitions test the correct macro work if the kernel has multiple output values
|
||||||
G_TYPED_KERNEL(GRetGArrayTupleOfGMat2Kernel, <GArray<std::tuple<GMat, GMat>>(GMat, Scalar)>, "org.opencv.test.retarrayoftupleofgmat2kernel") {};
|
G_TYPED_KERNEL(GRetGArrayTupleOfGMat2Kernel, <GArray<std::tuple<GMat, GMat>>(GMat, Scalar)>, "org.opencv.test.retarrayoftupleofgmat2kernel") {};
|
||||||
G_TYPED_KERNEL(GRetGArraTupleyOfGMat3Kernel, <GArray<std::tuple<GMat, GMat, GMat>>(GMat)>, "org.opencv.test.retarrayoftupleofgmat3kernel") {};
|
G_TYPED_KERNEL(GRetGArraTupleyOfGMat3Kernel, <GArray<std::tuple<GMat, GMat, GMat>>(GMat)>, "org.opencv.test.retarrayoftupleofgmat3kernel") {};
|
||||||
G_TYPED_KERNEL(GRetGArraTupleyOfGMat4Kernel, <GArray<std::tuple<GMat, GMat, GMat, GMat>>(GMat)>, "org.opencv.test.retarrayoftupleofgmat4kernel") {};
|
G_TYPED_KERNEL(GRetGArraTupleyOfGMat4Kernel, <GArray<std::tuple<GMat, GMat, GMat, GMat>>(GMat)>, "org.opencv.test.retarrayoftupleofgmat4kernel") {};
|
||||||
|
@ -346,9 +346,9 @@ void preproc_function(cv::gapi::wip::IPreprocEngine &preproc_engine, SafeQueue&q
|
|||||||
// launch pipeline
|
// launch pipeline
|
||||||
bool in_progress = false;
|
bool in_progress = false;
|
||||||
// let's allow counting of preprocessed frames to check this value later:
|
// let's allow counting of preprocessed frames to check this value later:
|
||||||
// Currently, it looks redundant to implement any kind of gracefull shutdown logic
|
// Currently, it looks redundant to implement any kind of graceful shutdown logic
|
||||||
// in this test - so let's apply agreement that media source is processed
|
// in this test - so let's apply agreement that media source is processed
|
||||||
// succesfully when preproc_number != 1 in result.
|
// successfully when preproc_number != 1 in result.
|
||||||
// Specific validation logic which adhere to explicit counter value may be implemented
|
// Specific validation logic which adhere to explicit counter value may be implemented
|
||||||
// in particular test scope
|
// in particular test scope
|
||||||
preproc_number = 1;
|
preproc_number = 1;
|
||||||
|
Loading…
Reference in New Issue
Block a user