diff --git a/modules/gapi/src/backends/onnx/gonnxbackend.cpp b/modules/gapi/src/backends/onnx/gonnxbackend.cpp index c81e032969..7ab386ecab 100644 --- a/modules/gapi/src/backends/onnx/gonnxbackend.cpp +++ b/modules/gapi/src/backends/onnx/gonnxbackend.cpp @@ -167,8 +167,16 @@ inline void preprocess(const cv::Mat& src, // No layout or dimension transformations done here! // TODO: This needs to be aligned across all NN backends. GAPI_Assert(toCV(ti.type) == CV_32F && "Only 32F model input is supported for 32F data"); - GAPI_Assert(toORT(src.size) == ti.dims && "32F tensor dimensions should match with NN input"); - GAPI_Assert(!ti.is_dynamic && "Dynamic inputs are not supported for this case"); + const auto tensor_dims = toORT(src.size); + if (tensor_dims.size() == ti.dims.size()) { + for (size_t i = 0; i < ti.dims.size(); ++i) { + GAPI_Assert((ti.dims[i] == -1 || ti.dims[i] == tensor_dims[i]) && + "32F tensor dimensions should match with all non-dynamic NN input dimensions"); + } + } else { + GAPI_Assert(false && "32F tensor size should match with NN input"); + } + dst = src; } else { // 8U input: full preprocessing path diff --git a/modules/gapi/test/infer/gapi_infer_onnx_test.cpp b/modules/gapi/test/infer/gapi_infer_onnx_test.cpp index ebb8020e9a..782e1b093a 100644 --- a/modules/gapi/test/infer/gapi_infer_onnx_test.cpp +++ b/modules/gapi/test/infer/gapi_infer_onnx_test.cpp @@ -12,29 +12,26 @@ #include #include +#include #include namespace { - struct ONNXInitPath { ONNXInitPath() { const char* env_path = getenv("OPENCV_GAPI_ONNX_MODEL_PATH"); - if (env_path) + if (env_path) { cvtest::addDataSearchPath(env_path); + } } }; static ONNXInitPath g_init_path; -cv::Mat initMatrixRandU(int type, cv::Size sz_in) -{ - cv::Mat in_mat1 = cv::Mat(sz_in, type); +cv::Mat initMatrixRandU(const int type, const cv::Size& sz_in) { + const cv::Mat in_mat1 = cv::Mat(sz_in, type); - if (CV_MAT_DEPTH(type) < CV_32F) - { + if (CV_MAT_DEPTH(type) < CV_32F) { cv::randu(in_mat1, cv::Scalar::all(0), cv::Scalar::all(255)); - } - else - { + } else { const int fscale = 256; // avoid bits near ULP, generate stable test input cv::Mat in_mat32s(in_mat1.size(), CV_MAKE_TYPE(CV_32S, CV_MAT_CN(type))); cv::randu(in_mat32s, cv::Scalar::all(0), cv::Scalar::all(255 * fscale)); @@ -42,111 +39,238 @@ cv::Mat initMatrixRandU(int type, cv::Size sz_in) } return in_mat1; } -} +} // anonymous namespace namespace opencv_test { namespace { // FIXME: taken from the DNN module -void normAssert(cv::InputArray ref, cv::InputArray test, +void normAssert(const cv::InputArray& ref, const cv::InputArray& test, const char *comment /*= ""*/, - double l1 = 0.00001, double lInf = 0.0001) -{ - double normL1 = cvtest::norm(ref, test, cv::NORM_L1) / ref.getMat().total(); + const double l1 = 0.00001, const double lInf = 0.0001) { + const double normL1 = cvtest::norm(ref, test, cv::NORM_L1) / ref.getMat().total(); EXPECT_LE(normL1, l1) << comment; - double normInf = cvtest::norm(ref, test, cv::NORM_INF); + const double normInf = cvtest::norm(ref, test, cv::NORM_INF); EXPECT_LE(normInf, lInf) << comment; } -std::string findModel(const std::string &model_name) -{ - return findDataFile("vision/classification/squeezenet/model/" + model_name + ".onnx", false); +inline std::string findModel(const std::string &model_name) { + return findDataFile("vision/" + model_name + ".onnx", false); } -inline void preprocess(const cv::Mat& src, - cv::Mat& dst, - const cv::Scalar& mean, - const cv::Scalar& std) { - int new_h = 224; - int new_w = 224; - cv::Mat tmp, nmat, cvt; - cv::resize(src, dst, cv::Size(new_w, new_h)); - dst.convertTo(cvt, CV_32F, 1.f / 255); - nmat = cvt - mean; - tmp = nmat / std; - dst.create(cv::Size(new_w, new_h * src.channels()), CV_32F); +inline void toCHW(const cv::Mat& src, cv::Mat& dst) { + dst.create(cv::Size(src.cols, src.rows * src.channels()), CV_32F); std::vector planes; for (int i = 0; i < src.channels(); ++i) { - planes.push_back(dst.rowRange(i * new_h, (i + 1) * new_h)); + planes.push_back(dst.rowRange(i * src.rows, (i + 1) * src.rows)); } - cv::split(tmp, planes); + cv::split(src, planes); } -void InferONNX(const std::string& model_path, - const cv::Mat& in, - cv::Mat& out, - const cv::Scalar& mean, - const cv::Scalar& std) -{ - // FIXME: It must be a FIXTURE test! - Ort::Env env(ORT_LOGGING_LEVEL_WARNING, "test"); - Ort::SessionOptions session_options; - Ort::Session session(env, model_path.data(), session_options); - auto input_node_dims = // 0 - one input - session.GetInputTypeInfo(0).GetTensorTypeAndShapeInfo().GetShape(); - auto output_node_dims = // 0 - one output - session.GetOutputTypeInfo(0).GetTensorTypeAndShapeInfo().GetShape(); +inline int toCV(const ONNXTensorElementDataType prec) { + switch (prec) { + case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8: return CV_8U; + case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT: return CV_32F; + default: GAPI_Assert(false && "Unsupported data type"); + } + return -1; +} + +inline std::vector toORT(const cv::MatSize &sz) { + return cv::to_own(sz); +} + +inline std::vector getCharNames(const std::vector& names) { + std::vector out_vec; + for (const auto& el : names) { + out_vec.push_back(el.data()); + } + return out_vec; +} + +inline void copyToOut(const cv::Mat& in, cv::Mat& out) { + GAPI_Assert(in.depth() == CV_32F); + GAPI_Assert(in.size == out.size); + const float* const inptr = in.ptr(); + float* const optr = out.ptr(); + const int size = in.total(); + for (int i = 0; i < size; ++i) { + optr[i] = inptr[i]; + } +} + +void remapYolo(const std::unordered_map &onnx, + std::unordered_map &gapi) { + GAPI_Assert(onnx.size() == 1u); + GAPI_Assert(gapi.size() == 1u); + // Result from Run method + const cv::Mat& in = onnx.begin()->second; + // Configured output + cv::Mat& out = gapi.begin()->second; + // Simple copy + copyToOut(in, out); +} + +void remapSsdPorts(const std::unordered_map &onnx, + std::unordered_map &gapi) { + // Result from Run method + const cv::Mat& in_num = onnx.at("num_detections:0"); + const cv::Mat& in_boxes = onnx.at("detection_boxes:0"); + const cv::Mat& in_scores = onnx.at("detection_scores:0"); + const cv::Mat& in_classes = onnx.at("detection_classes:0"); + // Configured outputs + cv::Mat& out_boxes = gapi.at("out1"); + cv::Mat& out_classes = gapi.at("out2"); + cv::Mat& out_scores = gapi.at("out3"); + cv::Mat& out_num = gapi.at("out4"); + // Simple copy for outputs + copyToOut(in_num, out_num); + copyToOut(in_boxes, out_boxes); + copyToOut(in_scores, out_scores); + copyToOut(in_classes, out_classes); +} + +class ONNXtest : public ::testing::Test { +public: + std::string model_path; + size_t num_in, num_out; + std::vector out_gapi; + std::vector out_onnx; + cv::Mat in_mat1; + + ONNXtest() { + env = Ort::Env(ORT_LOGGING_LEVEL_WARNING, "test"); + memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault); + out_gapi.resize(1); + out_onnx.resize(1); + // FIXME: All tests chek "random" image + // Ideally it should be a real image + in_mat1 = initMatrixRandU(CV_8UC3, cv::Size{640, 480}); + } + + template + void infer(const std::vector& ins, + std::vector& outs) { + // Prepare session + session = Ort::Session(env, model_path.data(), session_options); + num_in = session.GetInputCount(); + num_out = session.GetOutputCount(); + GAPI_Assert(num_in == ins.size()); + in_node_names.clear(); + out_node_names.clear(); + // Inputs Run params + std::vector in_tensors; + for(size_t i = 0; i < num_in; ++i) { + char* in_node_name_p = session.GetInputName(i, allocator); + in_node_names.push_back(std::string(in_node_name_p)); + allocator.Free(in_node_name_p); + in_node_dims = toORT(ins[i].size); + in_tensors.emplace_back(Ort::Value::CreateTensor(memory_info, + const_cast(ins[i].ptr()), + ins[i].total(), + in_node_dims.data(), + in_node_dims.size())); + } + // Outputs Run params + for(size_t i = 0; i < num_out; ++i) { + char* out_node_name_p = session.GetOutputName(i, allocator); + out_node_names.push_back(std::string(out_node_name_p)); + allocator.Free(out_node_name_p); + } + // Input/output order by names + const auto in_run_names = getCharNames(in_node_names); + const auto out_run_names = getCharNames(out_node_names); + // Run + auto result = session.Run(Ort::RunOptions{nullptr}, + in_run_names.data(), + &in_tensors.front(), + num_in, + out_run_names.data(), + num_out); + // Copy outputs + GAPI_Assert(result.size() == num_out); + outs.resize(num_out); + for (size_t i = 0; i < num_out; ++i) { + const auto info = result[i].GetTensorTypeAndShapeInfo(); + const auto shape = info.GetShape(); + const auto type = info.GetElementType(); + cv::Mat mt(std::vector(shape.begin(), shape.end()), toCV(type), + reinterpret_cast(result[i].GetTensorMutableData())); + mt.copyTo(outs[i]); + } + } + // One input/output overload + template + void infer(const cv::Mat& in, cv::Mat& out) { + std::vector result; + infer({in}, result); + GAPI_Assert(result.size() == 1u); + out = result.front(); + } + + void validate() { + GAPI_Assert(!out_gapi.empty() && !out_onnx.empty()); + ASSERT_EQ(out_gapi.size(), out_onnx.size()); + const auto size = out_gapi.size(); + for (size_t i = 0; i < size; ++i) { + normAssert(out_onnx[i], out_gapi[i], "Test outputs"); + } + } + + void useModel(const std::string& model_name) { + model_path = findModel(model_name); + } +private: + Ort::Env env{nullptr}; + Ort::MemoryInfo memory_info{nullptr}; Ort::AllocatorWithDefaultOptions allocator; - char* in_node_name_p = session.GetInputName(0, allocator); - char* out_node_name_p = session.GetOutputName(0, allocator); - std::string in_node_name(in_node_name_p); - std::string out_node_name(out_node_name_p); - allocator.Free(in_node_name_p); - allocator.Free(out_node_name_p); + Ort::SessionOptions session_options; + Ort::Session session{nullptr}; - auto memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault); - cv::Mat dst; - preprocess(in, dst, mean, std); + std::vector in_node_dims; + std::vector in_node_names; + std::vector out_node_names; +}; - out.create(std::vector(output_node_dims.begin(), - output_node_dims.end()), CV_32F); // empty output Mat - auto in_tensor = Ort::Value::CreateTensor(memory_info, - dst.ptr(), - dst.total(), - input_node_dims.data(), - input_node_dims.size()); - auto out_tensor = Ort::Value::CreateTensor(memory_info, - out.ptr(), - out.total(), - output_node_dims.data(), - output_node_dims.size()); - std::vector in_names = {in_node_name.data()}; - std::vector out_names = {out_node_name.data()}; - session.Run(Ort::RunOptions{nullptr}, - in_names.data(), - &in_tensor, - session.GetInputCount(), - out_names.data(), - &out_tensor, - session.GetOutputCount()); -} +class ONNXClassificationTest : public ONNXtest { +public: + const cv::Scalar mean = { 0.485, 0.456, 0.406 }; + const cv::Scalar std = { 0.229, 0.224, 0.225 }; + void preprocess(const cv::Mat& src, cv::Mat& dst) { + const int new_h = 224; + const int new_w = 224; + cv::Mat tmp, cvt, rsz; + cv::resize(src, rsz, cv::Size(new_w, new_h)); + rsz.convertTo(cvt, CV_32F, 1.f / 255); + tmp = (cvt - mean) / std; + toCHW(tmp, dst); + dst = dst.reshape(1, {1, 3, new_h, new_w}); + } +}; + +class ONNXGRayScaleTest : public ONNXtest { +public: + void preprocess(const cv::Mat& src, cv::Mat& dst) { + const int new_h = 64; + const int new_w = 64; + cv::Mat cvc, rsz, cvt; + cv::cvtColor(src, cvc, cv::COLOR_BGR2GRAY); + cv::resize(cvc, rsz, cv::Size(new_w, new_h)); + rsz.convertTo(cvt, CV_32F); + toCHW(cvt, dst); + dst = dst.reshape(1, {1, 1, new_h, new_w}); + } +}; } // anonymous namespace -TEST(ONNX, Infer) +TEST_F(ONNXClassificationTest, Infer) { - cv::Mat in_mat1, out_gapi, out_onnx; - std::string model_path = findModel("squeezenet1.0-9"); - // NOTE: All tests chek "random" image - // Ideally it should be a real image - in_mat1 = initMatrixRandU(CV_8UC3, cv::Size{640, 480}); - - cv::Scalar mean = { 0.485, 0.456, 0.406 }; - cv::Scalar std = { 0.229, 0.224, 0.225 }; - + useModel("classification/squeezenet/model/squeezenet1.0-9"); // ONNX_API code - InferONNX(model_path, in_mat1, out_onnx, mean, std); - + cv::Mat processed_mat; + preprocess(in_mat1, processed_mat); + infer(processed_mat, out_onnx.front()); // G_API code G_API_NET(SqueezNet, , "squeeznet"); cv::GMat in; @@ -154,125 +278,196 @@ TEST(ONNX, Infer) cv::GComputation comp(cv::GIn(in), cv::GOut(out)); // NOTE: We have to normalize U8 tensor // so cfgMeanStd() is here - auto net = cv::gapi::onnx::Params { model_path }.cfgMeanStd({mean},{std}); + auto net = cv::gapi::onnx::Params { model_path }.cfgMeanStd({ mean }, { std }); comp.apply(cv::gin(in_mat1), - cv::gout(out_gapi), + cv::gout(out_gapi.front()), cv::compile_args(cv::gapi::networks(net))); - // Validate - ASSERT_EQ(1000u, out_onnx.total()); - ASSERT_EQ(1000u, out_gapi.total()); - normAssert(out_onnx, out_gapi, "Test classification output"); + validate(); } -TEST(ONNX, InferROI) +TEST_F(ONNXtest, InferTensor) { - cv::Mat in_mat1, out_gapi, out_onnx; - std::string model_path = findModel("squeezenet1.0-9"); - in_mat1 = initMatrixRandU(CV_8UC3, cv::Size{640, 480}); - - cv::Scalar mean = { 0.485, 0.456, 0.406 }; // squeeznet mean - cv::Scalar std = { 0.229, 0.224, 0.225 }; // squeeznet std - - cv::Rect ROI(cv::Point{0, 0}, cv::Size{250, 250}); + useModel("classification/squeezenet/model/squeezenet1.0-9"); + // Create tensor + // FIXME: Test cheks "random" image + // Ideally it should be a real image + const cv::Mat rand_mat = initMatrixRandU(CV_32FC3, cv::Size{224, 224}); + const std::vector dims = {1, rand_mat.channels(), rand_mat.rows, rand_mat.cols}; + const cv::Mat tensor(dims, CV_32F, rand_mat.data); // ONNX_API code - InferONNX(model_path, in_mat1(ROI), out_onnx, mean, std); + infer(tensor, out_onnx.front()); + // G_API code + G_API_NET(SqueezNet, , "squeeznet"); + cv::GMat in; + cv::GMat out = cv::gapi::infer(in); + cv::GComputation comp(cv::GIn(in), cv::GOut(out)); + auto net = cv::gapi::onnx::Params { model_path }; + comp.apply(cv::gin(tensor), + cv::gout(out_gapi.front()), + cv::compile_args(cv::gapi::networks(net))); + // Validate + validate(); +} +TEST_F(ONNXClassificationTest, InferROI) +{ + useModel("classification/squeezenet/model/squeezenet1.0-9"); + const cv::Rect ROI(cv::Point{0, 0}, cv::Size{250, 250}); + // ONNX_API code + cv::Mat roi_mat; + preprocess(in_mat1(ROI), roi_mat); + infer(roi_mat, out_onnx.front()); // G_API code G_API_NET(SqueezNet, , "squeeznet"); cv::GMat in; cv::GOpaque rect; cv::GMat out = cv::gapi::infer(rect, in); cv::GComputation comp(cv::GIn(in, rect), cv::GOut(out)); - auto net = cv::gapi::onnx::Params { model_path }.cfgMeanStd({mean},{std}); + // NOTE: We have to normalize U8 tensor + // so cfgMeanStd() is here + auto net = cv::gapi::onnx::Params { model_path }.cfgMeanStd({ mean }, { std }); comp.apply(cv::gin(in_mat1, ROI), - cv::gout(out_gapi), + cv::gout(out_gapi.front()), cv::compile_args(cv::gapi::networks(net))); - // Validate - ASSERT_EQ(1000u, out_onnx.total()); - ASSERT_EQ(1000u, out_gapi.total()); - normAssert(out_onnx, out_gapi, "Test classification output"); + validate(); } -TEST(ONNX, InferROIList) +TEST_F(ONNXClassificationTest, InferROIList) { - cv::Mat in_mat1; - std::string model_path = findModel("squeezenet1.0-9"); - in_mat1 = initMatrixRandU(CV_8UC3, cv::Size{640, 480}); - - cv::Scalar mean = { 0.485, 0.456, 0.406 }; // squeeznet mean - cv::Scalar std = { 0.229, 0.224, 0.225 }; // squeeznet std - - std::vector rois = { + useModel("classification/squeezenet/model/squeezenet1.0-9"); + const std::vector rois = { cv::Rect(cv::Point{ 0, 0}, cv::Size{80, 120}), cv::Rect(cv::Point{50, 100}, cv::Size{250, 360}), }; - std::vector out_gapi; - std::vector out_onnx(rois.size()); // ONNX_API code + out_onnx.resize(rois.size()); for (size_t i = 0; i < rois.size(); ++i) { - InferONNX(model_path, in_mat1(rois[i]), out_onnx[i], mean, std); + cv::Mat roi_mat; + preprocess(in_mat1(rois[i]), roi_mat); + infer(roi_mat, out_onnx[i]); } - // G_API code G_API_NET(SqueezNet, , "squeeznet"); cv::GMat in; cv::GArray rr; cv::GArray out = cv::gapi::infer(rr, in); cv::GComputation comp(cv::GIn(in, rr), cv::GOut(out)); - auto net = cv::gapi::onnx::Params { model_path }.cfgMeanStd({mean},{std}); + // NOTE: We have to normalize U8 tensor + // so cfgMeanStd() is here + auto net = cv::gapi::onnx::Params { model_path }.cfgMeanStd({ mean }, { std }); comp.apply(cv::gin(in_mat1, rois), cv::gout(out_gapi), cv::compile_args(cv::gapi::networks(net))); - // Validate - for (size_t i = 0; i < rois.size(); ++i) { - ASSERT_EQ(1000u, out_onnx[i].total()); - ASSERT_EQ(1000u, out_gapi[i].total()); - normAssert(out_onnx[i], out_gapi[i], "Test classification output"); - } + validate(); } -TEST(ONNX, Infer2ROIList) +TEST_F(ONNXClassificationTest, Infer2ROIList) { - cv::Mat in_mat1; - std::string model_path = findModel("squeezenet1.0-9"); - in_mat1 = initMatrixRandU(CV_8UC3, cv::Size{640, 480}); - - cv::Scalar mean = { 0.485, 0.456, 0.406 }; // squeeznet mean - cv::Scalar std = { 0.229, 0.224, 0.225 }; // squeeznet std - - std::vector rois = { + useModel("classification/squeezenet/model/squeezenet1.0-9"); + const std::vector rois = { cv::Rect(cv::Point{ 0, 0}, cv::Size{80, 120}), cv::Rect(cv::Point{50, 100}, cv::Size{250, 360}), }; - std::vector out_gapi; - std::vector out_onnx(rois.size()); // ONNX_API code + out_onnx.resize(rois.size()); for (size_t i = 0; i < rois.size(); ++i) { - InferONNX(model_path, in_mat1(rois[i]), out_onnx[i], mean, std); + cv::Mat roi_mat; + preprocess(in_mat1(rois[i]), roi_mat); + infer(roi_mat, out_onnx[i]); } - // G_API code G_API_NET(SqueezNet, , "squeeznet"); cv::GMat in; cv::GArray rr; - cv::GArray out = cv::gapi::infer2(in,rr); + cv::GArray out = cv::gapi::infer2(in, rr); cv::GComputation comp(cv::GIn(in, rr), cv::GOut(out)); - auto net = cv::gapi::onnx::Params { model_path }.cfgMeanStd({mean},{std}); + // NOTE: We have to normalize U8 tensor + // so cfgMeanStd() is here + auto net = cv::gapi::onnx::Params { model_path }.cfgMeanStd({ mean }, { std }); comp.apply(cv::gin(in_mat1, rois), cv::gout(out_gapi), cv::compile_args(cv::gapi::networks(net))); - // Validate - for (size_t i = 0; i < rois.size(); ++i) { - ASSERT_EQ(1000u, out_onnx[i].total()); - ASSERT_EQ(1000u, out_gapi[i].total()); - normAssert(out_onnx[i], out_gapi[i], "Test classification output"); - } + validate(); } +TEST_F(ONNXtest, InferDynamicInputTensor) +{ + useModel("object_detection_segmentation/tiny-yolov2/model/tinyyolov2-8"); + // Create tensor + // FIXME: Test cheks "random" image + // Ideally it should be a real image + const cv::Mat rand_mat = initMatrixRandU(CV_32FC3, cv::Size{416, 416}); + const std::vector dims = {1, rand_mat.channels(), rand_mat.rows, rand_mat.cols}; + cv::Mat tensor(dims, CV_32F, rand_mat.data); + const cv::Mat in_tensor = tensor / 255.f; + // ONNX_API code + infer(in_tensor, out_onnx.front()); + // G_API code + G_API_NET(YoloNet, , "YoloNet"); + cv::GMat in; + cv::GMat out = cv::gapi::infer(in); + cv::GComputation comp(cv::GIn(in), cv::GOut(out)); + auto net = cv::gapi::onnx::Params{model_path} + .cfgPostProc({cv::GMatDesc{CV_32F, {1, 125, 13, 13}}}, remapYolo) + .cfgOutputLayers({"out"}); + comp.apply(cv::gin(in_tensor), + cv::gout(out_gapi.front()), + cv::compile_args(cv::gapi::networks(net))); + // Validate + validate(); +} + +TEST_F(ONNXGRayScaleTest, InferImage) +{ + useModel("body_analysis/emotion_ferplus/model/emotion-ferplus-8"); + // ONNX_API code + cv::Mat prep_mat; + preprocess(in_mat1, prep_mat); + infer(prep_mat, out_onnx.front()); + // G_API code + G_API_NET(EmotionNet, , "emotion-ferplus"); + cv::GMat in; + cv::GMat out = cv::gapi::infer(in); + cv::GComputation comp(cv::GIn(in), cv::GOut(out)); + auto net = cv::gapi::onnx::Params { model_path } + .cfgNormalize({ false }); // model accepts 0..255 range in FP32; + comp.apply(cv::gin(in_mat1), + cv::gout(out_gapi.front()), + cv::compile_args(cv::gapi::networks(net))); + // Validate + validate(); +} + +TEST_F(ONNXtest, InferMultOutput) +{ + useModel("object_detection_segmentation/ssd-mobilenetv1/model/ssd_mobilenet_v1_10"); + // ONNX_API code + const auto prep_mat = in_mat1.reshape(1, {1, in_mat1.rows, in_mat1.cols, in_mat1.channels()}); + infer({prep_mat}, out_onnx); + // G_API code + using SSDOut = std::tuple; + G_API_NET(MobileNet, , "ssd_mobilenet"); + cv::GMat in; + cv::GMat out1, out2, out3, out4; + std::tie(out1, out2, out3, out4) = cv::gapi::infer(in); + cv::GComputation comp(cv::GIn(in), cv::GOut(out1, out2, out3, out4)); + auto net = cv::gapi::onnx::Params{model_path} + .cfgOutputLayers({"out1", "out2", "out3", "out4"}) + .cfgPostProc({cv::GMatDesc{CV_32F, {1, 100, 4}}, + cv::GMatDesc{CV_32F, {1, 100}}, + cv::GMatDesc{CV_32F, {1, 100}}, + cv::GMatDesc{CV_32F, {1, 1}}}, remapSsdPorts); + out_gapi.resize(num_out); + comp.apply(cv::gin(in_mat1), + cv::gout(out_gapi[0], out_gapi[1], out_gapi[2], out_gapi[3]), + cv::compile_args(cv::gapi::networks(net))); + // Validate + validate(); +} } // namespace opencv_test #endif // HAVE_ONNX