mirror of
https://github.com/opencv/opencv.git
synced 2025-08-06 14:36:36 +08:00
Merge pull request #19752 from mpashchenkov:mp/onnx-int64-to-32
G-API: ONNX. Adding INT64-32 conversion for output. * Added int64 to 32 conversion * Added warning * Added type checks for all toCV * Added type checks for tests * Small fixes * Const for fixture in test * std::tuple if retutn value for toCV * Mistake * Changed toCV for tests * Added Assert * Fix for comments * One conversion for ONNX and IE * Clean up * One more fix * Added copyFromONNX * Removed warning * Apply review comments
This commit is contained in:
parent
3f14cb073b
commit
69fc0acd1a
@ -209,6 +209,12 @@ inline cv::util::optional<T> getCompileArg(const cv::GCompileArgs &args)
|
||||
|
||||
void GAPI_EXPORTS createMat(const cv::GMatDesc& desc, cv::Mat& mat);
|
||||
|
||||
inline void convertInt64ToInt32(const int64_t* src, int* dst, size_t size)
|
||||
{
|
||||
std::transform(src, src + size, dst,
|
||||
[](int64_t el) { return static_cast<int>(el); });
|
||||
}
|
||||
|
||||
}} // cv::gimpl
|
||||
|
||||
#endif // OPENCV_GAPI_GBACKEND_HPP
|
||||
|
@ -106,7 +106,7 @@ inline IE::Precision toIE(int depth) {
|
||||
case CV_8U: return IE::Precision::U8;
|
||||
case CV_32S: return IE::Precision::I32;
|
||||
case CV_32F: return IE::Precision::FP32;
|
||||
default: GAPI_Assert(false && "Unsupported data type");
|
||||
default: GAPI_Assert(false && "IE. Unsupported data type");
|
||||
}
|
||||
return IE::Precision::UNSPECIFIED;
|
||||
}
|
||||
@ -115,7 +115,8 @@ inline int toCV(IE::Precision prec) {
|
||||
case IE::Precision::U8: return CV_8U;
|
||||
case IE::Precision::FP32: return CV_32F;
|
||||
case IE::Precision::I32: return CV_32S;
|
||||
default: GAPI_Assert(false && "Unsupported data type");
|
||||
case IE::Precision::I64: return CV_32S;
|
||||
default: GAPI_Assert(false && "IE. Unsupported data type");
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
@ -158,7 +159,7 @@ inline IE::Blob::Ptr wrapIE(const cv::Mat &mat, cv::gapi::ie::TraitAs hint) {
|
||||
HANDLE(32F, float);
|
||||
HANDLE(32S, int);
|
||||
#undef HANDLE
|
||||
default: GAPI_Assert(false && "Unsupported data type");
|
||||
default: GAPI_Assert(false && "IE. Unsupported data type");
|
||||
}
|
||||
return IE::Blob::Ptr{};
|
||||
}
|
||||
@ -194,7 +195,14 @@ inline void copyFromIE(const IE::Blob::Ptr &blob, MatType &mat) {
|
||||
HANDLE(FP32, float);
|
||||
HANDLE(I32, int);
|
||||
#undef HANDLE
|
||||
default: GAPI_Assert(false && "Unsupported data type");
|
||||
case IE::Precision::I64: {
|
||||
GAPI_LOG_WARNING(NULL, "INT64 isn't supported for cv::Mat. Conversion to INT32 is used.");
|
||||
cv::gimpl::convertInt64ToInt32(blob->buffer().as<int64_t*>(),
|
||||
reinterpret_cast<int*>(mat.data),
|
||||
mat.total());
|
||||
break;
|
||||
}
|
||||
default: GAPI_Assert(false && "IE. Unsupported data type");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -170,7 +170,8 @@ inline int toCV(ONNXTensorElementDataType prec) {
|
||||
case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8: return CV_8U;
|
||||
case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT: return CV_32F;
|
||||
case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32: return CV_32S;
|
||||
default: GAPI_Assert(false && "Unsupported data type");
|
||||
case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64: return CV_32S;
|
||||
default: GAPI_Assert(false && "ONNX. Unsupported data type");
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
@ -184,11 +185,30 @@ inline std::vector<int> toCV(const std::vector<int64_t> &vsz) {
|
||||
return result;
|
||||
}
|
||||
|
||||
inline cv::Mat toCV(Ort::Value &v) {
|
||||
auto info = v.GetTensorTypeAndShapeInfo();
|
||||
return cv::Mat(toCV(info.GetShape()),
|
||||
toCV(info.GetElementType()),
|
||||
reinterpret_cast<void*>(v.GetTensorMutableData<uint8_t*>()));
|
||||
inline void copyFromONNX(Ort::Value &v, cv::Mat& mat) {
|
||||
const auto info = v.GetTensorTypeAndShapeInfo();
|
||||
const auto prec = info.GetElementType();
|
||||
const auto shape = toCV(info.GetShape());
|
||||
mat.create(shape, toCV(prec));
|
||||
switch (prec) {
|
||||
#define HANDLE(E,T) \
|
||||
case E: std::copy_n(v.GetTensorMutableData<T>(), \
|
||||
mat.total(), \
|
||||
reinterpret_cast<T*>(mat.data)); \
|
||||
break;
|
||||
HANDLE(ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8, uint8_t);
|
||||
HANDLE(ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT, float);
|
||||
HANDLE(ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32, int);
|
||||
#undef HANDLE
|
||||
case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64: {
|
||||
GAPI_LOG_WARNING(NULL, "INT64 isn't supported for cv::Mat. Conversion to INT32 is used.");
|
||||
cv::gimpl::convertInt64ToInt32(v.GetTensorMutableData<int64_t>(),
|
||||
reinterpret_cast<int*>(mat.data),
|
||||
mat.total());
|
||||
break;
|
||||
}
|
||||
default: GAPI_Assert(false && "ONNX. Unsupported data type");
|
||||
}
|
||||
}
|
||||
|
||||
inline std::vector<int64_t> toORT(const cv::MatSize &sz) {
|
||||
@ -199,12 +219,13 @@ inline void preprocess(const cv::Mat& src,
|
||||
const cv::gimpl::onnx::TensorInfo& ti,
|
||||
cv::Mat& dst) {
|
||||
GAPI_Assert(src.depth() == CV_32F || src.depth() == CV_8U);
|
||||
|
||||
// CNN input type
|
||||
const auto type = toCV(ti.type);
|
||||
if (src.depth() == CV_32F) {
|
||||
// Just pass the tensor as-is.
|
||||
// No layout or dimension transformations done here!
|
||||
// TODO: This needs to be aligned across all NN backends.
|
||||
GAPI_Assert(toCV(ti.type) == CV_32F && "Only 32F model input is supported for 32F data");
|
||||
GAPI_Assert(type == CV_32F && "Only 32F model input is supported for 32F input data");
|
||||
const auto tensor_dims = toORT(src.size);
|
||||
if (tensor_dims.size() == ti.dims.size()) {
|
||||
for (size_t i = 0; i < ti.dims.size(); ++i) {
|
||||
@ -225,9 +246,8 @@ inline void preprocess(const cv::Mat& src,
|
||||
const bool with_batch = ti.dims.size() == 4u ? true : false;
|
||||
const int shift = with_batch ? 0 : 1;
|
||||
|
||||
const auto ddepth = toCV(ti.type);
|
||||
GAPI_Assert((ddepth == CV_8U || ddepth == CV_32F)
|
||||
&& "Only 8U and 32F model input is supported for 8U data");
|
||||
GAPI_Assert((type == CV_8U || type == CV_32F)
|
||||
&& "Only 8U and 32F model input is supported for 8U input data");
|
||||
|
||||
// Assess the expected input layout
|
||||
const bool is_hwc = [&](int ch) {
|
||||
@ -261,8 +281,8 @@ inline void preprocess(const cv::Mat& src,
|
||||
|
||||
cv::Mat rsz, pp;
|
||||
cv::resize(csc, rsz, cv::Size(new_w, new_h));
|
||||
if (src.depth() == CV_8U && ddepth == CV_32F) {
|
||||
rsz.convertTo(pp, ddepth, ti.normalize ? 1.f / 255 : 1.f);
|
||||
if (src.depth() == CV_8U && type == CV_32F) {
|
||||
rsz.convertTo(pp, type, ti.normalize ? 1.f / 255 : 1.f);
|
||||
if (ti.mstd.has_value()) {
|
||||
pp -= ti.mstd->mean;
|
||||
pp /= ti.mstd->stdev;
|
||||
@ -273,7 +293,7 @@ inline void preprocess(const cv::Mat& src,
|
||||
|
||||
if (!is_hwc && new_c > 1) {
|
||||
// Convert to CHW
|
||||
dst.create(cv::Size(new_w, new_h * new_c), ddepth);
|
||||
dst.create(cv::Size(new_w, new_h * new_c), type);
|
||||
std::vector<cv::Mat> planes(new_c);
|
||||
for (int ch = 0; ch < new_c; ++ch) {
|
||||
planes[ch] = dst.rowRange(ch * new_h, (ch + 1) * new_h);
|
||||
@ -347,7 +367,7 @@ inline Ort::Value createTensor(const Ort::MemoryInfo& memory_info,
|
||||
case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32:
|
||||
return createTensor<int32_t>(memory_info, tensor_params, data);
|
||||
default:
|
||||
GAPI_Assert(false && "Unsupported data type");
|
||||
GAPI_Assert(false && "ONNX. Unsupported data type");
|
||||
}
|
||||
return Ort::Value{nullptr};
|
||||
}
|
||||
@ -796,7 +816,7 @@ void ONNXCompiled::Run(const std::vector<cv::Mat>& ins,
|
||||
ade::util::toRange(outputs))) {
|
||||
const auto &out_name = std::get<0>(iter).name;
|
||||
auto &out_tensor = std::get<1>(iter);
|
||||
onnx_outputs[out_name] = toCV(out_tensor);
|
||||
copyFromONNX(out_tensor, onnx_outputs[out_name]);
|
||||
}
|
||||
|
||||
// Fill in G-API outputs
|
||||
|
@ -122,16 +122,44 @@ inline void toCHW(const cv::Mat& src, cv::Mat& dst) {
|
||||
cv::split(src, planes);
|
||||
}
|
||||
|
||||
inline int toCV(const ONNXTensorElementDataType prec) {
|
||||
inline int toCV(ONNXTensorElementDataType prec) {
|
||||
switch (prec) {
|
||||
case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8: return CV_8U;
|
||||
case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT: return CV_32F;
|
||||
case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32: return CV_32S;
|
||||
case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64: return CV_32S;
|
||||
default: GAPI_Assert(false && "Unsupported data type");
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
void copyFromONNX(Ort::Value &v, cv::Mat& mat) {
|
||||
const auto info = v.GetTensorTypeAndShapeInfo();
|
||||
const auto prec = info.GetElementType();
|
||||
const auto shape = info.GetShape();
|
||||
const std::vector<int> dims(shape.begin(), shape.end());
|
||||
mat.create(dims, toCV(prec));
|
||||
switch (prec) {
|
||||
#define HANDLE(E,T) \
|
||||
case E: std::copy_n(v.GetTensorMutableData<T>(), \
|
||||
mat.total(), \
|
||||
reinterpret_cast<T*>(mat.data)); \
|
||||
break;
|
||||
HANDLE(ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8, uint8_t);
|
||||
HANDLE(ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT, float);
|
||||
HANDLE(ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32, int);
|
||||
#undef HANDLE
|
||||
case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64: {
|
||||
const auto o_ptr = v.GetTensorMutableData<int64_t>();
|
||||
const auto g_ptr = reinterpret_cast<int*>(mat.data);
|
||||
std::transform(o_ptr, o_ptr + mat.total(), g_ptr,
|
||||
[](int64_t el) { return static_cast<int>(el); });
|
||||
break;
|
||||
}
|
||||
default: GAPI_Assert(false && "ONNX. Unsupported data type");
|
||||
}
|
||||
}
|
||||
|
||||
inline std::vector<int64_t> toORT(const cv::MatSize &sz) {
|
||||
return cv::to_own<int64_t>(sz);
|
||||
}
|
||||
@ -237,6 +265,26 @@ void remapSSDPorts(const std::unordered_map<std::string, cv::Mat> &onnx,
|
||||
remapToIESSDOut({num_detections, detection_boxes, detection_scores, detection_classes}, ssd_output);
|
||||
}
|
||||
|
||||
void remapRCNNPorts(const std::unordered_map<std::string, cv::Mat> &onnx,
|
||||
std::unordered_map<std::string, cv::Mat> &gapi) {
|
||||
// Simple copy for outputs
|
||||
const cv::Mat& in_boxes = onnx.at("6379");
|
||||
const cv::Mat& in_labels = onnx.at("6381");
|
||||
const cv::Mat& in_scores = onnx.at("6383");
|
||||
|
||||
GAPI_Assert(in_boxes.depth() == CV_32F);
|
||||
GAPI_Assert(in_labels.depth() == CV_32S);
|
||||
GAPI_Assert(in_scores.depth() == CV_32F);
|
||||
|
||||
cv::Mat& out_boxes = gapi.at("out1");
|
||||
cv::Mat& out_labels = gapi.at("out2");
|
||||
cv::Mat& out_scores = gapi.at("out3");
|
||||
|
||||
copyToOut<float>(in_boxes, out_boxes);
|
||||
copyToOut<int>(in_labels, out_labels);
|
||||
copyToOut<float>(in_scores, out_scores);
|
||||
}
|
||||
|
||||
class ONNXtest : public ::testing::Test {
|
||||
public:
|
||||
std::string model_path;
|
||||
@ -250,7 +298,6 @@ public:
|
||||
env = Ort::Env(ORT_LOGGING_LEVEL_WARNING, "test");
|
||||
memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
|
||||
out_gapi.resize(1);
|
||||
out_onnx.resize(1);
|
||||
// FIXME: It should be an image from own (gapi) directory in opencv extra
|
||||
in_mat1 = cv::imread(findDataFile("cv/dpm/cat.png"));
|
||||
}
|
||||
@ -301,14 +348,13 @@ public:
|
||||
num_out);
|
||||
// Copy outputs
|
||||
GAPI_Assert(result.size() == num_out);
|
||||
outs.resize(num_out);
|
||||
for (size_t i = 0; i < num_out; ++i) {
|
||||
const auto info = result[i].GetTensorTypeAndShapeInfo();
|
||||
const auto shape = info.GetShape();
|
||||
const auto type = info.GetElementType();
|
||||
cv::Mat mt(std::vector<int>(shape.begin(), shape.end()), toCV(type),
|
||||
reinterpret_cast<void*>(result[i].GetTensorMutableData<uint8_t*>()));
|
||||
mt.copyTo(outs[i]);
|
||||
const auto type = toCV(info.GetElementType());
|
||||
const std::vector<int> dims(shape.begin(), shape.end());
|
||||
outs.emplace_back(dims, type);
|
||||
copyFromONNX(result[i], outs.back());
|
||||
}
|
||||
}
|
||||
// One input/output overload
|
||||
@ -357,7 +403,7 @@ public:
|
||||
// Rois for InferList, InferList2
|
||||
const std::vector<cv::Rect> rois = {
|
||||
cv::Rect(cv::Point{ 0, 0}, cv::Size{80, 120}),
|
||||
cv::Rect(cv::Point{50, 100}, cv::Size{250, 360}),
|
||||
cv::Rect(cv::Point{50, 100}, cv::Size{250, 360})
|
||||
};
|
||||
|
||||
void preprocess(const cv::Mat& src, cv::Mat& dst) {
|
||||
@ -426,6 +472,37 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
class ONNXRCNN : public ONNXWithRemap {
|
||||
private:
|
||||
const cv::Scalar rcnn_mean = { 102.9801, 115.9465, 122.7717 };
|
||||
const float range_max = 1333;
|
||||
const float range_min = 800;
|
||||
public:
|
||||
void preprocess(const cv::Mat& src, cv::Mat& dst) {
|
||||
cv::Mat rsz, cvt, chw, mn;
|
||||
const auto get_ratio = [&](const int dim) -> float {
|
||||
return ((dim > range_max) || (dim < range_min))
|
||||
? dim > range_max
|
||||
? range_max / dim
|
||||
: range_min / dim
|
||||
: 1.f;
|
||||
};
|
||||
const auto ratio_h = get_ratio(src.rows);
|
||||
const auto ratio_w = get_ratio(src.cols);
|
||||
const auto new_h = static_cast<int>(ratio_h * src.rows);
|
||||
const auto new_w = static_cast<int>(ratio_w * src.cols);
|
||||
cv::resize(src, rsz, cv::Size(new_w, new_h));
|
||||
rsz.convertTo(cvt, CV_32F, 1.f);
|
||||
toCHW(cvt, chw);
|
||||
mn = chw - rcnn_mean;
|
||||
const int padded_h = std::ceil(new_h / 32.f) * 32;
|
||||
const int padded_w = std::ceil(new_w / 32.f) * 32;
|
||||
cv::Mat pad_im(cv::Size(padded_w, 3 * padded_h), CV_32F, 0.f);
|
||||
pad_im(cv::Rect(0, 0, mn.cols, mn.rows)) += mn;
|
||||
dst = pad_im.reshape(1, {3, padded_h, padded_w});
|
||||
}
|
||||
};
|
||||
|
||||
class ONNXYoloV3MultiInput : public ONNXWithRemap {
|
||||
public:
|
||||
std::vector<cv::Mat> ins;
|
||||
@ -459,7 +536,7 @@ TEST_F(ONNXClassificationTest, Infer)
|
||||
// ONNX_API code
|
||||
cv::Mat processed_mat;
|
||||
preprocess(in_mat1, processed_mat);
|
||||
infer<float>(processed_mat, out_onnx.front());
|
||||
infer<float>(processed_mat, out_onnx);
|
||||
// G_API code
|
||||
G_API_NET(SqueezNet, <cv::GMat(cv::GMat)>, "squeeznet");
|
||||
cv::GMat in;
|
||||
@ -482,7 +559,7 @@ TEST_F(ONNXClassificationTest, InferTensor)
|
||||
cv::Mat tensor;
|
||||
preprocess(in_mat1, tensor);
|
||||
// ONNX_API code
|
||||
infer<float>(tensor, out_onnx.front());
|
||||
infer<float>(tensor, out_onnx);
|
||||
// G_API code
|
||||
G_API_NET(SqueezNet, <cv::GMat(cv::GMat)>, "squeeznet");
|
||||
cv::GMat in;
|
||||
@ -499,11 +576,11 @@ TEST_F(ONNXClassificationTest, InferTensor)
|
||||
TEST_F(ONNXClassificationTest, InferROI)
|
||||
{
|
||||
useModel("classification/squeezenet/model/squeezenet1.0-9");
|
||||
const auto ROI = rois.at(1);
|
||||
const auto ROI = rois.at(0);
|
||||
// ONNX_API code
|
||||
cv::Mat roi_mat;
|
||||
preprocess(in_mat1(ROI), roi_mat);
|
||||
infer<float>(roi_mat, out_onnx.front());
|
||||
infer<float>(roi_mat, out_onnx);
|
||||
// G_API code
|
||||
G_API_NET(SqueezNet, <cv::GMat(cv::GMat)>, "squeeznet");
|
||||
cv::GMat in;
|
||||
@ -524,11 +601,10 @@ TEST_F(ONNXClassificationTest, InferROIList)
|
||||
{
|
||||
useModel("classification/squeezenet/model/squeezenet1.0-9");
|
||||
// ONNX_API code
|
||||
out_onnx.resize(rois.size());
|
||||
for (size_t i = 0; i < rois.size(); ++i) {
|
||||
cv::Mat roi_mat;
|
||||
preprocess(in_mat1(rois[i]), roi_mat);
|
||||
infer<float>(roi_mat, out_onnx[i]);
|
||||
infer<float>(roi_mat, out_onnx);
|
||||
}
|
||||
// G_API code
|
||||
G_API_NET(SqueezNet, <cv::GMat(cv::GMat)>, "squeeznet");
|
||||
@ -550,11 +626,10 @@ TEST_F(ONNXClassificationTest, Infer2ROIList)
|
||||
{
|
||||
useModel("classification/squeezenet/model/squeezenet1.0-9");
|
||||
// ONNX_API code
|
||||
out_onnx.resize(rois.size());
|
||||
for (size_t i = 0; i < rois.size(); ++i) {
|
||||
cv::Mat roi_mat;
|
||||
preprocess(in_mat1(rois[i]), roi_mat);
|
||||
infer<float>(roi_mat, out_onnx[i]);
|
||||
infer<float>(roi_mat, out_onnx);
|
||||
}
|
||||
// G_API code
|
||||
G_API_NET(SqueezNet, <cv::GMat(cv::GMat)>, "squeeznet");
|
||||
@ -582,7 +657,7 @@ TEST_F(ONNXWithRemap, InferDynamicInputTensor)
|
||||
toCHW(cvt, tensor);
|
||||
tensor = tensor.reshape(1, {1, 3, 416, 416});
|
||||
// ONNX_API code
|
||||
infer<float>(tensor, out_onnx.front());
|
||||
infer<float>(tensor, out_onnx);
|
||||
// G_API code
|
||||
G_API_NET(YoloNet, <cv::GMat(cv::GMat)>, "YoloNet");
|
||||
cv::GMat in;
|
||||
@ -604,7 +679,7 @@ TEST_F(ONNXGRayScaleTest, InferImage)
|
||||
// ONNX_API code
|
||||
cv::Mat prep_mat;
|
||||
preprocess(in_mat1, prep_mat);
|
||||
infer<float>(prep_mat, out_onnx.front());
|
||||
infer<float>(prep_mat, out_onnx);
|
||||
// G_API code
|
||||
G_API_NET(EmotionNet, <cv::GMat(cv::GMat)>, "emotion-ferplus");
|
||||
cv::GMat in;
|
||||
@ -650,7 +725,7 @@ TEST_F(ONNXMediaFrameTest, InferBGR)
|
||||
// ONNX_API code
|
||||
cv::Mat processed_mat;
|
||||
preprocess(in_mat1, processed_mat);
|
||||
infer<float>(processed_mat, out_onnx.front());
|
||||
infer<float>(processed_mat, out_onnx);
|
||||
// G_API code
|
||||
auto frame = MediaFrame::Create<TestMediaBGR>(in_mat1);
|
||||
G_API_NET(SqueezNet, <cv::GMat(cv::GMat)>, "squeeznet");
|
||||
@ -676,7 +751,7 @@ TEST_F(ONNXMediaFrameTest, InferYUV)
|
||||
cvtColorTwoPlane(m_in_y, m_in_uv, pp, cv::COLOR_YUV2BGR_NV12);
|
||||
cv::Mat processed_mat;
|
||||
preprocess(pp, processed_mat);
|
||||
infer<float>(processed_mat, out_onnx.front());
|
||||
infer<float>(processed_mat, out_onnx);
|
||||
// G_API code
|
||||
G_API_NET(SqueezNet, <cv::GMat(cv::GMat)>, "squeeznet");
|
||||
cv::GFrame in;
|
||||
@ -699,7 +774,7 @@ TEST_F(ONNXMediaFrameTest, InferROIBGR)
|
||||
// ONNX_API code
|
||||
cv::Mat roi_mat;
|
||||
preprocess(in_mat1(rois.front()), roi_mat);
|
||||
infer<float>(roi_mat, out_onnx.front());
|
||||
infer<float>(roi_mat, out_onnx);
|
||||
// G_API code
|
||||
G_API_NET(SqueezNet, <cv::GMat(cv::GMat)>, "squeeznet");
|
||||
cv::GFrame in;
|
||||
@ -725,7 +800,7 @@ TEST_F(ONNXMediaFrameTest, InferROIYUV)
|
||||
cvtColorTwoPlane(m_in_y, m_in_uv, pp, cv::COLOR_YUV2BGR_NV12);
|
||||
cv::Mat roi_mat;
|
||||
preprocess(pp(rois.front()), roi_mat);
|
||||
infer<float>(roi_mat, out_onnx.front());
|
||||
infer<float>(roi_mat, out_onnx);
|
||||
// G_API code
|
||||
G_API_NET(SqueezNet, <cv::GMat(cv::GMat)>, "squeeznet");
|
||||
cv::GFrame in;
|
||||
@ -747,11 +822,10 @@ TEST_F(ONNXMediaFrameTest, InferListBGR)
|
||||
useModel("classification/squeezenet/model/squeezenet1.0-9");
|
||||
const auto frame = MediaFrame::Create<TestMediaBGR>(in_mat1);
|
||||
// ONNX_API code
|
||||
out_onnx.resize(rois.size());
|
||||
for (size_t i = 0; i < rois.size(); ++i) {
|
||||
cv::Mat roi_mat;
|
||||
preprocess(in_mat1(rois[i]), roi_mat);
|
||||
infer<float>(roi_mat, out_onnx[i]);
|
||||
infer<float>(roi_mat, out_onnx);
|
||||
}
|
||||
// G_API code
|
||||
G_API_NET(SqueezNet, <cv::GMat(cv::GMat)>, "squeeznet");
|
||||
@ -776,11 +850,10 @@ TEST_F(ONNXMediaFrameTest, InferListYUV)
|
||||
// ONNX_API code
|
||||
cv::Mat pp;
|
||||
cvtColorTwoPlane(m_in_y, m_in_uv, pp, cv::COLOR_YUV2BGR_NV12);
|
||||
out_onnx.resize(rois.size());
|
||||
for (size_t i = 0; i < rois.size(); ++i) {
|
||||
cv::Mat roi_mat;
|
||||
preprocess(pp(rois[i]), roi_mat);
|
||||
infer<float>(roi_mat, out_onnx[i]);
|
||||
infer<float>(roi_mat, out_onnx);
|
||||
}
|
||||
// G_API code
|
||||
G_API_NET(SqueezNet, <cv::GMat(cv::GMat)>, "squeeznet");
|
||||
@ -803,11 +876,10 @@ TEST_F(ONNXMediaFrameTest, InferList2BGR)
|
||||
useModel("classification/squeezenet/model/squeezenet1.0-9");
|
||||
const auto frame = MediaFrame::Create<TestMediaBGR>(in_mat1);
|
||||
// ONNX_API code
|
||||
out_onnx.resize(rois.size());
|
||||
for (size_t i = 0; i < rois.size(); ++i) {
|
||||
cv::Mat roi_mat;
|
||||
preprocess(in_mat1(rois[i]), roi_mat);
|
||||
infer<float>(roi_mat, out_onnx[i]);
|
||||
infer<float>(roi_mat, out_onnx);
|
||||
}
|
||||
// G_API code
|
||||
G_API_NET(SqueezNet, <cv::GMat(cv::GMat)>, "squeeznet");
|
||||
@ -832,11 +904,10 @@ TEST_F(ONNXMediaFrameTest, InferList2YUV)
|
||||
// ONNX_API code
|
||||
cv::Mat pp;
|
||||
cvtColorTwoPlane(m_in_y, m_in_uv, pp, cv::COLOR_YUV2BGR_NV12);
|
||||
out_onnx.resize(rois.size());
|
||||
for (size_t i = 0; i < rois.size(); ++i) {
|
||||
cv::Mat roi_mat;
|
||||
preprocess(pp(rois[i]), roi_mat);
|
||||
infer<float>(roi_mat, out_onnx[i]);
|
||||
infer<float>(roi_mat, out_onnx);
|
||||
}
|
||||
// G_API code
|
||||
G_API_NET(SqueezNet, <cv::GMat(cv::GMat)>, "squeeznet");
|
||||
@ -917,6 +988,32 @@ TEST_F(ONNXYoloV3MultiInput, InferBSConstInput)
|
||||
// Validate
|
||||
validate();
|
||||
}
|
||||
|
||||
TEST_F(ONNXRCNN, ConversionInt64to32)
|
||||
{
|
||||
useModel("object_detection_segmentation/faster-rcnn/model/FasterRCNN-10");
|
||||
cv::Mat dst;
|
||||
preprocess(in_mat1, dst);
|
||||
// ONNX_API code
|
||||
infer<float>(dst, out_onnx);
|
||||
// G_API code
|
||||
using FRCNNOUT = std::tuple<cv::GMat,cv::GMat,cv::GMat>;
|
||||
G_API_NET(FasterRCNN, <FRCNNOUT(cv::GMat)>, "FasterRCNN");
|
||||
auto net = cv::gapi::onnx::Params<FasterRCNN>{model_path}
|
||||
.cfgOutputLayers({"out1", "out2", "out3"})
|
||||
.cfgPostProc({cv::GMatDesc{CV_32F, {7,4}},
|
||||
cv::GMatDesc{CV_32S, {7}},
|
||||
cv::GMatDesc{CV_32F, {7}}}, remapRCNNPorts);
|
||||
cv::GMat in, out1, out2, out3;
|
||||
std::tie(out1, out2, out3) = cv::gapi::infer<FasterRCNN>(in);
|
||||
cv::GComputation comp(cv::GIn(in), cv::GOut(out1, out2, out3));
|
||||
out_gapi.resize(num_out);
|
||||
comp.apply(cv::gin(dst),
|
||||
cv::gout(out_gapi[0], out_gapi[1], out_gapi[2]),
|
||||
cv::compile_args(cv::gapi::networks(net)));
|
||||
// Validate
|
||||
validate();
|
||||
}
|
||||
} // namespace opencv_test
|
||||
|
||||
#endif // HAVE_ONNX
|
||||
|
Loading…
Reference in New Issue
Block a user