|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#include "precomp.hpp"
|
|
|
|
|
|
|
|
|
|
|
|
#include "backends/ie/giebackend.hpp"
|
|
|
|
|
|
#if defined HAVE_INF_ENGINE && INF_ENGINE_RELEASE < 2023010000
|
|
|
|
|
|
#if INF_ENGINE_RELEASE <= 2019010000
|
|
|
# error G-API IE module supports only OpenVINO IE >= 2019 R1
|
|
|
#endif
|
|
|
|
|
|
#include <functional>
|
|
|
#include <unordered_set>
|
|
|
#include <atomic>
|
|
|
#include <tuple>
|
|
|
|
|
|
|
|
|
#include <ade/util/algorithm.hpp>
|
|
|
|
|
|
#include <ade/util/range.hpp>
|
|
|
#include <ade/util/zip_range.hpp>
|
|
|
#include <ade/util/chain_range.hpp>
|
|
|
#include <ade/typed_graph.hpp>
|
|
|
|
|
|
#include <opencv2/core/utility.hpp>
|
|
|
#include <opencv2/core/utils/logger.hpp>
|
|
|
|
|
|
#include <opencv2/gapi/gcommon.hpp>
|
|
|
#include <opencv2/gapi/garray.hpp>
|
|
|
#include <opencv2/gapi/gopaque.hpp>
|
|
|
#include <opencv2/gapi/util/any.hpp>
|
|
|
#include <opencv2/gapi/gtype_traits.hpp>
|
|
|
#include <opencv2/gapi/infer.hpp>
|
|
|
#include <opencv2/gapi/own/convert.hpp>
|
|
|
#include <opencv2/gapi/gframe.hpp>
|
|
|
|
|
|
#include "compiler/gobjref.hpp"
|
|
|
#include "compiler/gmodel.hpp"
|
|
|
|
|
|
#include "backends/ie/util.hpp"
|
|
|
#include "backends/ie/giebackend/giewrapper.hpp"
|
|
|
|
|
|
#include "api/gbackend_priv.hpp"
|
|
|
#include "logger.hpp"
|
|
|
|
|
|
#if INF_ENGINE_RELEASE < 2021010000
|
|
|
#include "ie_compound_blob.h"
|
|
|
#endif
|
|
|
|
|
|
#if defined(HAVE_TBB)
|
|
|
# include <tbb/concurrent_queue.h>
|
|
|
template<typename T> using QueueClass = tbb::concurrent_bounded_queue<T>;
|
|
|
#else
|
|
|
# include "executor/conc_queue.hpp"
|
|
|
template<typename T> using QueueClass = cv::gapi::own::concurrent_bounded_queue<T>;
|
|
|
#endif
|
|
|
|
|
|
#include "utils/itt.hpp"
|
|
|
|
|
|
#include "streaming/onevpl/engine/preproc_engine_interface.hpp"
|
|
|
#include "streaming/onevpl/engine/preproc/preproc_dispatcher.hpp"
|
|
|
|
|
|
namespace IE = InferenceEngine;
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
IE::Layout toIE(const std::string &layout) {
|
|
|
const std::unordered_map<std::string, IE::Layout> layouts = {
|
|
|
{"NCDHW", IE::Layout::NCDHW},
|
|
|
{"NDHWC", IE::Layout::NDHWC},
|
|
|
{"NHWC" , IE::Layout::NHWC },
|
|
|
{"NCHW" , IE::Layout::NCHW },
|
|
|
{"CHW" , IE::Layout::CHW },
|
|
|
{"HWC" , IE::Layout::HWC },
|
|
|
{"HW" , IE::Layout::HW },
|
|
|
{"NC" , IE::Layout::NC },
|
|
|
{"CN" , IE::Layout::CN },
|
|
|
{"C" , IE::Layout::C },
|
|
|
};
|
|
|
|
|
|
const auto it = layouts.find(layout);
|
|
|
if (it == layouts.end()) {
|
|
|
cv::util::throw_error(
|
|
|
std::logic_error("IE Backend: Unsupported layout: " + layout));
|
|
|
}
|
|
|
return it->second;
|
|
|
};
|
|
|
|
|
|
inline IE::ROI toIE(const cv::Rect &rc) {
|
|
|
return IE::ROI
|
|
|
{ 0u
|
|
|
, static_cast<std::size_t>(rc.x)
|
|
|
, static_cast<std::size_t>(rc.y)
|
|
|
, static_cast<std::size_t>(rc.width)
|
|
|
, static_cast<std::size_t>(rc.height)
|
|
|
};
|
|
|
}
|
|
|
|
|
|
inline IE::SizeVector toIE(const cv::MatSize &sz) {
|
|
|
return cv::to_own<IE::SizeVector::value_type>(sz);
|
|
|
}
|
|
|
inline std::vector<int> toCV(const IE::SizeVector &vsz) {
|
|
|
std::vector<int> result;
|
|
|
result.reserve(vsz.size());
|
|
|
for (auto sz : vsz) {
|
|
|
result.push_back(ade::util::checked_cast<int>(sz));
|
|
|
}
|
|
|
return result;
|
|
|
}
|
|
|
|
|
|
inline IE::Layout toIELayout(const std::size_t ndims) {
|
|
|
static const IE::Layout lts[] = {
|
|
|
IE::Layout::SCALAR,
|
|
|
IE::Layout::C,
|
|
|
IE::Layout::NC,
|
|
|
IE::Layout::CHW,
|
|
|
IE::Layout::NCHW,
|
|
|
IE::Layout::NCDHW,
|
|
|
};
|
|
|
|
|
|
|
|
|
CV_Assert(ndims < sizeof(lts) / sizeof(lts[0]));
|
|
|
return lts[ndims];
|
|
|
}
|
|
|
|
|
|
inline IE::Precision toIE(int depth) {
|
|
|
switch (depth) {
|
|
|
case CV_8U: return IE::Precision::U8;
|
|
|
case CV_32S: return IE::Precision::I32;
|
|
|
case CV_32F: return IE::Precision::FP32;
|
|
|
case CV_16F: return IE::Precision::FP16;
|
|
|
default: GAPI_Error("IE. Unsupported data type");
|
|
|
}
|
|
|
return IE::Precision::UNSPECIFIED;
|
|
|
}
|
|
|
inline int toCV(IE::Precision prec) {
|
|
|
switch (prec) {
|
|
|
case IE::Precision::U8: return CV_8U;
|
|
|
case IE::Precision::FP32: return CV_32F;
|
|
|
case IE::Precision::I32: return CV_32S;
|
|
|
case IE::Precision::I64: return CV_32S;
|
|
|
case IE::Precision::FP16: return CV_16F;
|
|
|
default: GAPI_Error("IE. Unsupported data type");
|
|
|
}
|
|
|
return -1;
|
|
|
}
|
|
|
|
|
|
inline IE::ResizeAlgorithm toIEInterp(int interpolation) {
|
|
|
switch (interpolation) {
|
|
|
case cv::INTER_LINEAR: return IE::RESIZE_BILINEAR;
|
|
|
case cv::INTER_AREA: return IE::RESIZE_AREA;
|
|
|
default: GAPI_Error("IE Backend: Unsupported resize algorithm");
|
|
|
}
|
|
|
|
|
|
GAPI_Assert(false);
|
|
|
}
|
|
|
|
|
|
template <typename Attr>
|
|
|
using AttrMap = cv::gapi::ie::detail::AttrMap<Attr>;
|
|
|
|
|
|
template <typename Attr>
|
|
|
using LayerVariantAttr = cv::gapi::ie::detail::LayerVariantAttr<Attr>;
|
|
|
|
|
|
template <typename Attr> AttrMap<Attr>
|
|
|
broadcastLayerAttr(const LayerVariantAttr<Attr> &layer_attr,
|
|
|
const std::vector<std::string> &layer_names) {
|
|
|
AttrMap<Attr> map;
|
|
|
if (cv::util::holds_alternative<AttrMap<Attr>>(layer_attr)) {
|
|
|
map = cv::util::get<AttrMap<Attr>>(layer_attr);
|
|
|
|
|
|
std::unordered_set<std::string> existing_layers =
|
|
|
{layer_names.begin(), layer_names.end()};
|
|
|
|
|
|
for (const auto &p : map) {
|
|
|
const auto it = existing_layers.find(p.first);
|
|
|
if (it == existing_layers.end()) {
|
|
|
cv::util::throw_error(
|
|
|
std::logic_error("IE Backend: Failed to"
|
|
|
" find layer with name: " + p.first));
|
|
|
}
|
|
|
}
|
|
|
} else if (cv::util::holds_alternative<Attr>(layer_attr)) {
|
|
|
|
|
|
auto elem = cv::util::get<Attr>(layer_attr);
|
|
|
for (auto &&layer_name : layer_names) {
|
|
|
map.emplace(layer_name, elem);
|
|
|
}
|
|
|
}
|
|
|
return map;
|
|
|
}
|
|
|
|
|
|
|
|
|
template <typename K, typename V>
|
|
|
cv::optional<V> lookUp(const std::map<K, V> &map, const K& key) {
|
|
|
const auto it = map.find(key);
|
|
|
if (it == map.end()) {
|
|
|
return {};
|
|
|
}
|
|
|
return cv::util::make_optional(std::move(it->second));
|
|
|
}
|
|
|
|
|
|
static bool isImage(const cv::GMatDesc &desc,
|
|
|
const IE::SizeVector &model_dims) {
|
|
|
return (model_dims.size() == 4u) &&
|
|
|
(!desc.isND()) &&
|
|
|
(desc.chan == 1 || desc.chan == 3) &&
|
|
|
(desc.size.height != 1 && desc.size.width != 1) &&
|
|
|
(desc.depth == CV_8U);
|
|
|
}
|
|
|
|
|
|
cv::gapi::ie::TraitAs clarifyTrait(const cv::GMatDesc &mat_desc,
|
|
|
const IE::SizeVector &model_dims) {
|
|
|
if (isImage(mat_desc, model_dims)) {
|
|
|
return cv::gapi::ie::TraitAs::IMAGE;
|
|
|
}
|
|
|
return cv::gapi::ie::TraitAs::TENSOR;
|
|
|
}
|
|
|
|
|
|
cv::gapi::ie::TraitAs clarifyTrait(const cv::GMetaArg &meta,
|
|
|
const IE::SizeVector &model_dims) {
|
|
|
|
|
|
|
|
|
if (cv::util::holds_alternative<cv::GFrameDesc>(meta)) {
|
|
|
return cv::gapi::ie::TraitAs::IMAGE;
|
|
|
}
|
|
|
GAPI_Assert(cv::util::holds_alternative<cv::GMatDesc>(meta));
|
|
|
return clarifyTrait(cv::util::get<cv::GMatDesc>(meta), model_dims);
|
|
|
}
|
|
|
|
|
|
inline IE::TensorDesc toIE(const cv::Mat &mat, cv::gapi::ie::TraitAs hint) {
|
|
|
const auto &sz = mat.size;
|
|
|
if (sz.dims() == 2 && hint == cv::gapi::ie::TraitAs::IMAGE)
|
|
|
{
|
|
|
|
|
|
const size_t channels = mat.channels();
|
|
|
const size_t height = mat.size().height;
|
|
|
const size_t width = mat.size().width;
|
|
|
|
|
|
const size_t strideH = mat.step1();
|
|
|
IE::BlockingDesc bdesc({1, height, width, channels} ,
|
|
|
{0, 2, 3, 1} ,
|
|
|
0 ,
|
|
|
{0, 0, 0, 0} ,
|
|
|
{strideH * height, strideH, channels, 1} );
|
|
|
|
|
|
return IE::TensorDesc(toIE(mat.depth()),
|
|
|
IE::SizeVector{1, channels, height, width}, bdesc);
|
|
|
}
|
|
|
return IE::TensorDesc(toIE(mat.depth()), toIE(sz), toIELayout(sz.dims()));
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
inline IE::SizeVector toIEDims(const IE::SizeVector &dims,
|
|
|
const IE::Layout layout) {
|
|
|
switch (layout) {
|
|
|
case IE::Layout::NDHWC:
|
|
|
return {dims[0], dims[4], dims[1], dims[2], dims[3]};
|
|
|
case IE::Layout::NHWC:
|
|
|
return {dims[0], dims[3], dims[1], dims[2]};
|
|
|
case IE::Layout::HWC:
|
|
|
return {dims[2], dims[0], dims[1]};
|
|
|
default: return dims;
|
|
|
}
|
|
|
GAPI_Assert(false);
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
inline std::vector<int> toCVDims(const std::vector<int> &dims,
|
|
|
const IE::Layout layout) {
|
|
|
switch (layout) {
|
|
|
case IE::Layout::NDHWC:
|
|
|
return {dims[0], dims[2], dims[3], dims[4], dims[1]};
|
|
|
case IE::Layout::NHWC:
|
|
|
return {dims[0], dims[2], dims[3], dims[1]};
|
|
|
case IE::Layout::HWC:
|
|
|
return {dims[1], dims[2], dims[0]};
|
|
|
default: return dims;
|
|
|
}
|
|
|
GAPI_Assert(false);
|
|
|
}
|
|
|
|
|
|
inline IE::TensorDesc toIE(const cv::Mat &mat,
|
|
|
const cv::gapi::ie::TraitAs hint,
|
|
|
const IE::Layout layout) {
|
|
|
const auto &sz = mat.size;
|
|
|
if (sz.dims() == 2 && hint == cv::gapi::ie::TraitAs::IMAGE)
|
|
|
{
|
|
|
|
|
|
const size_t channels = mat.channels();
|
|
|
const size_t height = mat.size().height;
|
|
|
const size_t width = mat.size().width;
|
|
|
|
|
|
const size_t strideH = mat.step1();
|
|
|
IE::BlockingDesc bdesc({1, height, width, channels} ,
|
|
|
{0, 2, 3, 1} ,
|
|
|
0 ,
|
|
|
{0, 0, 0, 0} ,
|
|
|
{strideH * height, strideH, channels, 1} );
|
|
|
|
|
|
return IE::TensorDesc(toIE(mat.depth()),
|
|
|
IE::SizeVector{1, channels, height, width}, bdesc);
|
|
|
}
|
|
|
return IE::TensorDesc(toIE(mat.depth()),
|
|
|
toIEDims(toIE(sz), layout),
|
|
|
layout);
|
|
|
}
|
|
|
|
|
|
inline IE::Blob::Ptr wrapIE(const cv::Mat &mat,
|
|
|
cv::gapi::ie::TraitAs hint,
|
|
|
const IE::Layout layout = IE::Layout::ANY) {
|
|
|
const auto tDesc = toIE(mat, hint, layout);
|
|
|
switch (mat.depth()) {
|
|
|
|
|
|
|
|
|
#define HANDLE(E,T) \
|
|
|
case CV_##E: return IE::make_shared_blob<T>(tDesc, const_cast<T*>(mat.ptr<T>()))
|
|
|
HANDLE(8U, uint8_t);
|
|
|
HANDLE(32F, float);
|
|
|
HANDLE(32S, int);
|
|
|
HANDLE(16F, int16_t);
|
|
|
#undef HANDLE
|
|
|
default: GAPI_Error("IE. Unsupported data type");
|
|
|
}
|
|
|
return IE::Blob::Ptr{};
|
|
|
}
|
|
|
|
|
|
inline IE::Blob::Ptr wrapIE(const cv::MediaFrame::View& view,
|
|
|
const cv::GFrameDesc& desc) {
|
|
|
|
|
|
switch (desc.fmt) {
|
|
|
case cv::MediaFormat::BGR: {
|
|
|
auto bgr = cv::Mat(desc.size, CV_8UC3, view.ptr[0], view.stride[0]);
|
|
|
return wrapIE(bgr, cv::gapi::ie::TraitAs::IMAGE);
|
|
|
}
|
|
|
case cv::MediaFormat::NV12: {
|
|
|
auto y_plane = cv::Mat(desc.size, CV_8UC1, view.ptr[0], view.stride[0]);
|
|
|
auto uv_plane = cv::Mat(desc.size / 2, CV_8UC2, view.ptr[1], view.stride[1]);
|
|
|
return cv::gapi::ie::util::to_ie(y_plane, uv_plane);
|
|
|
}
|
|
|
case cv::MediaFormat::GRAY: {
|
|
|
auto gray = cv::Mat(desc.size, CV_8UC1, view.ptr[0], view.stride[0]);
|
|
|
return wrapIE(gray, cv::gapi::ie::TraitAs::IMAGE);
|
|
|
}
|
|
|
default:
|
|
|
GAPI_Error("Unsupported media format for IE backend");
|
|
|
}
|
|
|
GAPI_Error("InternalError");
|
|
|
}
|
|
|
|
|
|
template<class MatType>
|
|
|
inline void copyFromIE(const IE::Blob::Ptr &blob, MatType &mat) {
|
|
|
const auto& desc = blob->getTensorDesc();
|
|
|
const auto ie_type = toCV(desc.getPrecision());
|
|
|
if (ie_type != mat.type()) {
|
|
|
std::stringstream ss;
|
|
|
ss << "Failed to copy blob from IE to OCV: "
|
|
|
<< "Blobs have different data types "
|
|
|
<< "(IE type: " << ie_type
|
|
|
<< " vs OCV type: " << mat.type() << ")." << std::endl;
|
|
|
throw std::logic_error(ss.str());
|
|
|
}
|
|
|
switch (blob->getTensorDesc().getPrecision()) {
|
|
|
#define HANDLE(E,T) \
|
|
|
case IE::Precision::E: std::copy_n(blob->buffer().as<T*>(), \
|
|
|
mat.total(), \
|
|
|
reinterpret_cast<T*>(mat.data)); \
|
|
|
break;
|
|
|
HANDLE(U8, uint8_t);
|
|
|
HANDLE(FP32, float);
|
|
|
HANDLE(I32, int);
|
|
|
HANDLE(FP16, cv::hfloat);
|
|
|
#undef HANDLE
|
|
|
case IE::Precision::I64: {
|
|
|
GAPI_LOG_WARNING(NULL, "INT64 isn't supported for cv::Mat. Conversion to INT32 is used.");
|
|
|
cv::gimpl::convertInt64ToInt32(blob->buffer().as<int64_t*>(),
|
|
|
reinterpret_cast<int*>(mat.data),
|
|
|
mat.total());
|
|
|
break;
|
|
|
}
|
|
|
default: GAPI_Error("IE. Unsupported data type");
|
|
|
}
|
|
|
}
|
|
|
|
|
|
template <typename MapT>
|
|
|
void checkLayerNames(const MapT& network_map,
|
|
|
const std::vector<std::string>& layer_names,
|
|
|
const std::string& layer_type) {
|
|
|
for (const auto& layer_name : layer_names) {
|
|
|
const auto it = network_map.find(layer_name);
|
|
|
if (it == network_map.end()) {
|
|
|
std::stringstream ss;
|
|
|
ss << "Failed to find " << layer_type << " layer with name: "
|
|
|
<< "\"" << layer_name << "\"" << std::endl;
|
|
|
ss << "Network " << layer_type << " layers: " << std::endl;
|
|
|
for (const auto& p : network_map) {
|
|
|
const auto& desc = p.second->getTensorDesc();
|
|
|
ss << p.first << " : " << desc.getPrecision()
|
|
|
<< " / " << desc.getLayout() << std::endl;
|
|
|
}
|
|
|
throw std::logic_error(ss.str());
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
|
|
|
template <typename MapT>
|
|
|
void checkInputLayerNames(const MapT& network_map,
|
|
|
const std::vector<std::string>& layer_names) {
|
|
|
checkLayerNames(network_map, layer_names, "input");
|
|
|
}
|
|
|
|
|
|
template <typename MapT>
|
|
|
void checkOutputLayerNames(const MapT& network_map,
|
|
|
const std::vector<std::string>& layer_names) {
|
|
|
checkLayerNames(network_map, layer_names, "output");
|
|
|
}
|
|
|
|
|
|
|
|
|
struct IEUnit {
|
|
|
static const char *name() { return "IEModelConfig"; }
|
|
|
|
|
|
cv::gapi::ie::detail::ParamDesc params;
|
|
|
IE::CNNNetwork net;
|
|
|
|
|
|
IE::ExecutableNetwork this_network;
|
|
|
cv::gimpl::ie::wrap::Plugin this_plugin;
|
|
|
|
|
|
InferenceEngine::RemoteContext::Ptr rctx = nullptr;
|
|
|
|
|
|
std::shared_ptr<cv::gapi::wip::IPreprocEngine> preproc_engine_impl;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
using PreProcMap = std::unordered_map<std::string, IE::PreProcessInfo>;
|
|
|
PreProcMap preproc_map;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class InputFramesDesc {
|
|
|
using input_name_type = std::string;
|
|
|
using description_type = cv::GFrameDesc;
|
|
|
std::map<input_name_type, description_type> map;
|
|
|
public:
|
|
|
static bool is_applicable(const cv::GMetaArg &mm);
|
|
|
const description_type &get_param(const input_name_type &input) const;
|
|
|
|
|
|
void set_param(const input_name_type &input,
|
|
|
const IE::TensorDesc& desc);
|
|
|
};
|
|
|
|
|
|
InputFramesDesc net_input_params;
|
|
|
std::unordered_map<std::string, cv::gapi::ie::TraitAs> inputs_type;
|
|
|
|
|
|
explicit IEUnit(const cv::gapi::ie::detail::ParamDesc &pp)
|
|
|
: params(pp) {
|
|
|
InferenceEngine::ParamMap* ctx_params =
|
|
|
cv::util::any_cast<InferenceEngine::ParamMap>(¶ms.context_config);
|
|
|
if (ctx_params != nullptr) {
|
|
|
auto ie_core = cv::gimpl::ie::wrap::getCore();
|
|
|
GAPI_LOG_DEBUG(nullptr, "create IE remote ctx for device id: " << params.device_id);
|
|
|
rctx = ie_core.CreateContext(params.device_id, *ctx_params);
|
|
|
}
|
|
|
|
|
|
if (params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Load) {
|
|
|
net = cv::gimpl::ie::wrap::readNetwork(params);
|
|
|
|
|
|
if (params.batch_size.has_value()) {
|
|
|
net.setBatchSize(params.batch_size.value());
|
|
|
}
|
|
|
} else if (params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Import) {
|
|
|
this_plugin = cv::gimpl::ie::wrap::getPlugin(params);
|
|
|
this_network = cv::gimpl::ie::wrap::importNetwork(this_plugin, params, rctx);
|
|
|
if (!params.reshape_table.empty() || !params.layer_names_to_reshape.empty()) {
|
|
|
GAPI_LOG_WARNING(NULL, "Reshape isn't supported for imported network");
|
|
|
}
|
|
|
} else {
|
|
|
cv::util::throw_error(std::logic_error("Unsupported ParamDesc::Kind"));
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (params.num_in > 1u && params.num_in != params.input_names.size()) {
|
|
|
cv::util::throw_error(std::logic_error("Please specify input layer names for "
|
|
|
+ params.model_path));
|
|
|
}
|
|
|
if (params.num_out > 1u && params.num_out != params.output_names.size()) {
|
|
|
cv::util::throw_error(std::logic_error("Please specify output layer names for "
|
|
|
+ params.model_path));
|
|
|
}
|
|
|
if (params.num_in == 1u && params.input_names.empty()) {
|
|
|
if (params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Load) {
|
|
|
params.input_names = { net.getInputsInfo().begin()->first };
|
|
|
} else {
|
|
|
params.input_names = { this_network.GetInputsInfo().begin()->first };
|
|
|
}
|
|
|
}
|
|
|
if (params.num_out == 1u && params.output_names.empty()) {
|
|
|
if (params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Load) {
|
|
|
params.output_names = { net.getOutputsInfo().begin()->first };
|
|
|
} else {
|
|
|
params.output_names = { this_network.GetOutputsInfo().begin()->first };
|
|
|
}
|
|
|
}
|
|
|
if (!params.reshape_table.empty()) {
|
|
|
GAPI_Assert((params.reshape_table.size() + params.layer_names_to_reshape.size()) <=
|
|
|
params.num_in &&
|
|
|
"Number of layers to reshape must be less than or equal to number of inputs");
|
|
|
}
|
|
|
|
|
|
if (params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Load) {
|
|
|
checkInputLayerNames(net.getInputsInfo(), params.input_names);
|
|
|
checkOutputLayerNames(net.getOutputsInfo(), params.output_names);
|
|
|
} else if (params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Import) {
|
|
|
checkInputLayerNames(this_network.GetInputsInfo(), params.input_names);
|
|
|
checkOutputLayerNames(this_network.GetOutputsInfo(), params.output_names);
|
|
|
} else {
|
|
|
cv::util::throw_error(std::logic_error("Unsupported ParamDesc::Kind"));
|
|
|
}
|
|
|
|
|
|
if (params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Import &&
|
|
|
!cv::util::holds_alternative<cv::util::monostate>(params.output_precision)) {
|
|
|
cv::util::throw_error(
|
|
|
std::logic_error("Setting output precision isn't supported for imported network"));
|
|
|
}
|
|
|
|
|
|
|
|
|
using namespace cv::gapi::wip::onevpl;
|
|
|
if (params.vpl_preproc_device.has_value() && params.vpl_preproc_ctx.has_value()) {
|
|
|
using namespace cv::gapi::wip;
|
|
|
GAPI_LOG_INFO(nullptr, "VPP preproc creation requested");
|
|
|
preproc_engine_impl =
|
|
|
IPreprocEngine::create_preproc_engine<onevpl::VPPPreprocDispatcher>(
|
|
|
params.vpl_preproc_device.value(),
|
|
|
params.vpl_preproc_ctx.value());
|
|
|
GAPI_LOG_INFO(nullptr, "VPP preproc created successfuly");
|
|
|
}
|
|
|
|
|
|
if (params.mode == cv::gapi::ie::InferMode::Sync &&
|
|
|
params.nireq != 1u) {
|
|
|
throw std::logic_error(
|
|
|
"Failed: cv::gapi::ie::InferMode::Sync works only with nireq equal to 1.");
|
|
|
}
|
|
|
}
|
|
|
|
|
|
|
|
|
cv::gimpl::ie::IECompiled compile() const {
|
|
|
IEUnit* non_const_this = const_cast<IEUnit*>(this);
|
|
|
|
|
|
|
|
|
|
|
|
if (params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Load) {
|
|
|
non_const_this->this_plugin = cv::gimpl::ie::wrap::getPlugin(params);
|
|
|
non_const_this->this_network = cv::gimpl::ie::wrap::loadNetwork(non_const_this->this_plugin,
|
|
|
net, params, rctx);
|
|
|
}
|
|
|
|
|
|
return {params, this_plugin, this_network};
|
|
|
}
|
|
|
};
|
|
|
|
|
|
bool IEUnit::InputFramesDesc::is_applicable(const cv::GMetaArg &mm) {
|
|
|
return cv::util::holds_alternative<cv::GFrameDesc>(mm);
|
|
|
}
|
|
|
|
|
|
const IEUnit::InputFramesDesc::description_type &
|
|
|
IEUnit::InputFramesDesc::get_param(const input_name_type &input) const {
|
|
|
auto it = map.find(input);
|
|
|
GAPI_Assert(it != map.end() && "No appropriate input is found in InputFramesDesc");
|
|
|
return it->second;
|
|
|
}
|
|
|
|
|
|
void IEUnit::InputFramesDesc::set_param(const input_name_type &input,
|
|
|
const IE::TensorDesc& desc) {
|
|
|
description_type ret;
|
|
|
ret.fmt = cv::MediaFormat::NV12;
|
|
|
const InferenceEngine::SizeVector& inDims = desc.getDims();
|
|
|
auto layout = desc.getLayout();
|
|
|
GAPI_LOG_DEBUG(nullptr, "network input: " << input <<
|
|
|
", tensor dims: " << inDims[0] << ", " << inDims[1] <<
|
|
|
", " << inDims[2] << ", " << inDims[3]);
|
|
|
if (layout != InferenceEngine::NHWC && layout != InferenceEngine::NCHW) {
|
|
|
GAPI_LOG_WARNING(nullptr, "Unsupported layout for VPP preproc: " << layout <<
|
|
|
", input name: " << input);
|
|
|
GAPI_Error("Unsupported layout for VPP preproc");
|
|
|
}
|
|
|
GAPI_Assert(inDims.size() == 4u);
|
|
|
ret.size.width = static_cast<int>(inDims[3]);
|
|
|
ret.size.height = static_cast<int>(inDims[2]);
|
|
|
|
|
|
auto res = map.emplace(input, ret);
|
|
|
GAPI_Assert(res.second && "Duplicated input info in InputFramesDesc are not allowable");
|
|
|
}
|
|
|
|
|
|
class IECallContext
|
|
|
{
|
|
|
public:
|
|
|
IECallContext(const IEUnit & unit,
|
|
|
cv::gimpl::GIslandExecutable::IOutput & output,
|
|
|
const cv::GArgs & args,
|
|
|
const std::vector<cv::gimpl::RcDesc> & outs,
|
|
|
cv::GRunArg::Meta && meta,
|
|
|
std::vector<cv::gimpl::GIslandExecutable::InObj> && input_objs,
|
|
|
std::vector<cv::gimpl::GIslandExecutable::OutObj> && output_objs);
|
|
|
|
|
|
const cv::GArgs& inArgs() const;
|
|
|
|
|
|
|
|
|
template<typename T>
|
|
|
const T& inArg(std::size_t input) const {
|
|
|
return m_args.at(input).get<T>();
|
|
|
}
|
|
|
|
|
|
template<typename T>
|
|
|
std::vector<T>& outVecR(std::size_t output) {
|
|
|
return outVecRef(output).wref<T>();
|
|
|
}
|
|
|
|
|
|
|
|
|
cv::GShape inShape(std::size_t input) const;
|
|
|
const cv::Mat& inMat (std::size_t input) const;
|
|
|
const cv::MediaFrame& inFrame(std::size_t input) const;
|
|
|
|
|
|
cv::GRunArgP output (std::size_t idx);
|
|
|
cv::Mat& outMatR(std::size_t idx);
|
|
|
|
|
|
cv::gapi::ie::TraitAs getInputType(const std::string &layer_name) const;
|
|
|
|
|
|
const IEUnit &uu;
|
|
|
cv::gimpl::GIslandExecutable::IOutput &out;
|
|
|
|
|
|
|
|
|
using Views = std::vector<std::unique_ptr<cv::MediaFrame::View>>;
|
|
|
Views views;
|
|
|
|
|
|
|
|
|
std::exception_ptr eptr;
|
|
|
|
|
|
const cv::GRunArg::Meta& getMeta() { return m_meta; };
|
|
|
|
|
|
using req_key_t = void*;
|
|
|
cv::MediaFrame* prepareKeepAliveFrameSlot(req_key_t key);
|
|
|
size_t releaseKeepAliveFrame(req_key_t key);
|
|
|
private:
|
|
|
cv::detail::VectorRef& outVecRef(std::size_t idx);
|
|
|
|
|
|
cv::GArg packArg(const cv::GArg &arg);
|
|
|
|
|
|
|
|
|
cv::GRunArg::Meta m_meta;
|
|
|
|
|
|
|
|
|
std::vector<cv::gimpl::GIslandExecutable::InObj> m_input_objs;
|
|
|
std::vector<cv::gimpl::GIslandExecutable::OutObj> m_output_objs;
|
|
|
|
|
|
|
|
|
cv::gimpl::Mag m_res;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
std::unordered_map<std::size_t, cv::GRunArgP> m_results;
|
|
|
|
|
|
|
|
|
cv::GArgs m_args;
|
|
|
cv::GShapes m_in_shapes;
|
|
|
|
|
|
|
|
|
std::mutex keep_alive_frames_mutex;
|
|
|
std::unordered_map<req_key_t, cv::MediaFrame> keep_alive_pp_frames;
|
|
|
|
|
|
|
|
|
std::unordered_map<std::string, cv::gapi::ie::TraitAs> input_type;
|
|
|
};
|
|
|
|
|
|
IECallContext::IECallContext(const IEUnit & unit,
|
|
|
cv::gimpl::GIslandExecutable::IOutput & output,
|
|
|
const cv::GArgs & args,
|
|
|
const std::vector<cv::gimpl::RcDesc> & outs,
|
|
|
cv::GRunArg::Meta && meta,
|
|
|
std::vector<cv::gimpl::GIslandExecutable::InObj> && input_objs,
|
|
|
std::vector<cv::gimpl::GIslandExecutable::OutObj> && output_objs)
|
|
|
: uu(unit), out(output), m_meta(std::move(meta)),
|
|
|
m_input_objs(std::move(input_objs)), m_output_objs(std::move(output_objs))
|
|
|
{
|
|
|
for (auto& it : m_input_objs) cv::gimpl::magazine::bindInArg (m_res, it.first, it.second);
|
|
|
for (auto& it : m_output_objs) cv::gimpl::magazine::bindOutArg(m_res, it.first, it.second);
|
|
|
|
|
|
m_args.reserve(args.size());
|
|
|
using namespace std::placeholders;
|
|
|
ade::util::transform(args,
|
|
|
std::back_inserter(m_args),
|
|
|
std::bind(&IECallContext::packArg, this, _1));
|
|
|
|
|
|
ade::util::transform(args, std::back_inserter(m_in_shapes),
|
|
|
[](const cv::GArg& arg) {
|
|
|
return arg.get<cv::gimpl::RcDesc>().shape;
|
|
|
});
|
|
|
|
|
|
for (const auto out_it : ade::util::indexed(outs)) {
|
|
|
|
|
|
const auto port = ade::util::index(out_it);
|
|
|
const auto desc = ade::util::value(out_it);
|
|
|
m_results[port] = cv::gimpl::magazine::getObjPtr(m_res, desc);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
cv::gapi::ie::TraitAs
|
|
|
IECallContext::getInputType(const std::string &layer_name) const {
|
|
|
const auto it = uu.inputs_type.find(layer_name);
|
|
|
if (it == uu.inputs_type.end()) {
|
|
|
cv::util::throw_error(std::logic_error(
|
|
|
"Failed to find input type for layer: \"" + layer_name + "\""));
|
|
|
}
|
|
|
return it->second;
|
|
|
}
|
|
|
|
|
|
const cv::GArgs& IECallContext::inArgs() const {
|
|
|
return m_args;
|
|
|
}
|
|
|
|
|
|
cv::GShape IECallContext::inShape(std::size_t i) const {
|
|
|
return m_in_shapes[i];
|
|
|
}
|
|
|
|
|
|
const cv::Mat& IECallContext::inMat(std::size_t input) const {
|
|
|
return inArg<cv::Mat>(input);
|
|
|
}
|
|
|
|
|
|
const cv::MediaFrame& IECallContext::inFrame(std::size_t input) const {
|
|
|
return inArg<cv::MediaFrame>(input);
|
|
|
}
|
|
|
|
|
|
cv::Mat& IECallContext::outMatR(std::size_t idx) {
|
|
|
return *cv::util::get<cv::Mat*>(m_results.at(idx));
|
|
|
}
|
|
|
|
|
|
cv::GRunArgP IECallContext::output(std::size_t idx) {
|
|
|
return m_output_objs[idx].second;
|
|
|
};
|
|
|
|
|
|
cv::detail::VectorRef& IECallContext::outVecRef(std::size_t idx) {
|
|
|
return cv::util::get<cv::detail::VectorRef>(m_results.at(idx));
|
|
|
}
|
|
|
|
|
|
cv::GArg IECallContext::packArg(const cv::GArg &arg) {
|
|
|
|
|
|
|
|
|
GAPI_Assert( arg.kind != cv::detail::ArgKind::GMAT
|
|
|
&& arg.kind != cv::detail::ArgKind::GSCALAR
|
|
|
&& arg.kind != cv::detail::ArgKind::GARRAY);
|
|
|
|
|
|
if (arg.kind != cv::detail::ArgKind::GOBJREF) {
|
|
|
cv::util::throw_error(std::logic_error("Inference supports G-types ONLY!"));
|
|
|
}
|
|
|
GAPI_Assert(arg.kind == cv::detail::ArgKind::GOBJREF);
|
|
|
|
|
|
|
|
|
|
|
|
const cv::gimpl::RcDesc &ref = arg.get<cv::gimpl::RcDesc>();
|
|
|
switch (ref.shape)
|
|
|
{
|
|
|
case cv::GShape::GMAT: return cv::GArg(m_res.slot<cv::Mat>()[ref.id]);
|
|
|
|
|
|
|
|
|
|
|
|
case cv::GShape::GARRAY: return cv::GArg(m_res.slot<cv::detail::VectorRef>().at(ref.id));
|
|
|
|
|
|
|
|
|
|
|
|
case cv::GShape::GOPAQUE: return cv::GArg(m_res.slot<cv::detail::OpaqueRef>().at(ref.id));
|
|
|
|
|
|
case cv::GShape::GFRAME: return cv::GArg(m_res.slot<cv::MediaFrame>().at(ref.id));
|
|
|
|
|
|
default:
|
|
|
cv::util::throw_error(std::logic_error("Unsupported GShape type"));
|
|
|
break;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
cv::MediaFrame* IECallContext::prepareKeepAliveFrameSlot(req_key_t key) {
|
|
|
std::lock_guard<std::mutex> lock(keep_alive_frames_mutex);
|
|
|
return &keep_alive_pp_frames[key];
|
|
|
}
|
|
|
|
|
|
size_t IECallContext::releaseKeepAliveFrame(req_key_t key) {
|
|
|
size_t elapsed_count = 0;
|
|
|
void *prev_slot = nullptr;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
{
|
|
|
std::lock_guard<std::mutex> lock(keep_alive_frames_mutex);
|
|
|
auto ka_frame_it = keep_alive_pp_frames.find(key);
|
|
|
if (ka_frame_it != keep_alive_pp_frames.end()) {
|
|
|
prev_slot = &ka_frame_it->second;
|
|
|
ka_frame_it->second = cv::MediaFrame();
|
|
|
}
|
|
|
elapsed_count = keep_alive_pp_frames.size();
|
|
|
}
|
|
|
cv::util::suppress_unused_warning(prev_slot);
|
|
|
GAPI_LOG_DEBUG(nullptr, "Release keep alive frame, slot: " << prev_slot <<
|
|
|
", reserved frames count: " << elapsed_count);
|
|
|
return elapsed_count;
|
|
|
}
|
|
|
|
|
|
struct IECallable {
|
|
|
static const char *name() { return "IERequestCallable"; }
|
|
|
using Run = std::function<void(std::shared_ptr<IECallContext>, cv::gimpl::ie::RequestPool&)>;
|
|
|
Run run;
|
|
|
};
|
|
|
|
|
|
struct KImpl {
|
|
|
cv::gimpl::CustomMetaFunction::CM customMetaFunc;
|
|
|
IECallable::Run run;
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
using GIEModel = ade::TypedGraph
|
|
|
< cv::gimpl::Protocol
|
|
|
, cv::gimpl::Op
|
|
|
, cv::gimpl::NetworkParams
|
|
|
, cv::gimpl::CustomMetaFunction
|
|
|
, IEUnit
|
|
|
, IECallable
|
|
|
>;
|
|
|
|
|
|
|
|
|
using GConstGIEModel = ade::ConstTypedGraph
|
|
|
< cv::gimpl::Protocol
|
|
|
, cv::gimpl::Op
|
|
|
, cv::gimpl::NetworkParams
|
|
|
, cv::gimpl::CustomMetaFunction
|
|
|
, IEUnit
|
|
|
, IECallable
|
|
|
>;
|
|
|
|
|
|
cv::MediaFrame preprocess_frame_impl(cv::MediaFrame &&in_frame, const std::string &layer_name,
|
|
|
IECallContext& ctx,
|
|
|
const cv::util::optional<cv::Rect> &opt_roi,
|
|
|
cv::MediaFrame* out_keep_alive_frame,
|
|
|
bool* out_is_preprocessed) {
|
|
|
cv::util::optional<cv::gapi::wip::pp_params> param =
|
|
|
ctx.uu.preproc_engine_impl->is_applicable(in_frame);
|
|
|
if (param.has_value()) {
|
|
|
GAPI_LOG_DEBUG(nullptr, "VPP preprocessing for decoded remote frame will be used");
|
|
|
cv::GFrameDesc expected_net_input_descr =
|
|
|
ctx.uu.net_input_params.get_param(layer_name);
|
|
|
|
|
|
|
|
|
|
|
|
if(ctx.uu.params.device_id.find("GPU") != std::string::npos &&
|
|
|
ctx.uu.rctx) {
|
|
|
auto it = ctx.uu.params.config.find(std::string("GPU_NV12_TWO_INPUTS"));
|
|
|
if (it != ctx.uu.params.config.end()) {
|
|
|
if (it->second == "YES") {
|
|
|
GAPI_LOG_DEBUG(nullptr, "Adjust preprocessing GPU media format to NV12");
|
|
|
expected_net_input_descr.fmt = cv::MediaFormat::NV12;
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
|
|
|
cv::gapi::wip::pp_session pp_sess =
|
|
|
ctx.uu.preproc_engine_impl->initialize_preproc(param.value(),
|
|
|
expected_net_input_descr);
|
|
|
|
|
|
in_frame = ctx.uu.preproc_engine_impl->run_sync(pp_sess, in_frame, opt_roi);
|
|
|
|
|
|
if (out_keep_alive_frame != nullptr) {
|
|
|
GAPI_LOG_DEBUG(nullptr, "remember preprocessed remote frame to keep it busy from reuse, slot: " <<
|
|
|
out_keep_alive_frame);
|
|
|
*out_keep_alive_frame = in_frame;
|
|
|
}
|
|
|
if (out_is_preprocessed) {
|
|
|
*out_is_preprocessed = true;
|
|
|
}
|
|
|
}
|
|
|
return std::move(in_frame);
|
|
|
}
|
|
|
|
|
|
inline IE::Blob::Ptr extractBlob(IECallContext& ctx,
|
|
|
std::size_t i,
|
|
|
const cv::gapi::ie::TraitAs hint,
|
|
|
const IE::Layout &layout,
|
|
|
const std::string& layer_name,
|
|
|
const cv::util::optional<cv::Rect> &opt_roi,
|
|
|
cv::MediaFrame* out_keep_alive_frame = nullptr,
|
|
|
bool* out_is_preprocessed = nullptr) {
|
|
|
switch (ctx.inShape(i)) {
|
|
|
case cv::GShape::GFRAME: {
|
|
|
auto frame = ctx.inFrame(i);
|
|
|
if (ctx.uu.preproc_engine_impl) {
|
|
|
GAPI_LOG_DEBUG(nullptr, "Try to use preprocessing for decoded frame in local ctx");
|
|
|
frame = preprocess_frame_impl(std::move(frame), layer_name, ctx, opt_roi,
|
|
|
out_keep_alive_frame, out_is_preprocessed);
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (ctx.uu.rctx != nullptr) {
|
|
|
|
|
|
cv::util::any any_blob_params = frame.blobParams();
|
|
|
using ParamType = std::pair<InferenceEngine::TensorDesc, InferenceEngine::ParamMap>;
|
|
|
using NV12ParamType = std::pair<ParamType, ParamType>;
|
|
|
|
|
|
NV12ParamType* blob_params = cv::util::any_cast<NV12ParamType>(&any_blob_params);
|
|
|
if (blob_params == nullptr) {
|
|
|
GAPI_Error("Incorrect type of blobParams:"
|
|
|
"expected std::pair<ParamType, ParamType>,"
|
|
|
"with ParamType std::pair<InferenceEngine::TensorDesc,"
|
|
|
"InferenceEngine::ParamMap >>");
|
|
|
}
|
|
|
|
|
|
|
|
|
auto y_blob = ctx.uu.rctx->CreateBlob(blob_params->first.first, blob_params->first.second);
|
|
|
auto uv_blob = ctx.uu.rctx->CreateBlob(blob_params->second.first, blob_params->second.second);
|
|
|
|
|
|
#if INF_ENGINE_RELEASE > 2023000000
|
|
|
cv::util::throw_error(std::logic_error(
|
|
|
"IE Backend: NV12 feature has been deprecated in OpenVINO 1.0 API."
|
|
|
" The last version which supports this is 2023.0"));
|
|
|
#elif INF_ENGINE_RELEASE >= 2021010000
|
|
|
return IE::make_shared_blob<IE::NV12Blob>(y_blob, uv_blob);
|
|
|
#else
|
|
|
return IE::make_shared_blob<InferenceEngine::NV12Blob>(y_blob, uv_blob);
|
|
|
#endif
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
ctx.views.emplace_back(new cv::MediaFrame::View(frame.access(cv::MediaFrame::Access::R)));
|
|
|
return wrapIE(*(ctx.views.back()), frame.desc());
|
|
|
}
|
|
|
case cv::GShape::GMAT: {
|
|
|
return wrapIE(ctx.inMat(i), hint, layout);
|
|
|
}
|
|
|
default:
|
|
|
GAPI_Assert("Unsupported input shape for IE backend");
|
|
|
}
|
|
|
GAPI_Error("InternalError");
|
|
|
}
|
|
|
|
|
|
static void setBlob(InferenceEngine::InferRequest& req,
|
|
|
const std::string& layer_name,
|
|
|
const IE::Blob::Ptr& blob,
|
|
|
const IECallContext& ctx) {
|
|
|
|
|
|
|
|
|
|
|
|
using namespace cv::gapi::ie::detail;
|
|
|
if (ctx.uu.params.kind == ParamDesc::Kind::Load) {
|
|
|
req.SetBlob(layer_name, blob);
|
|
|
} else {
|
|
|
GAPI_Assert(ctx.uu.params.kind == ParamDesc::Kind::Import);
|
|
|
#if INF_ENGINE_RELEASE > 2023000000
|
|
|
|
|
|
|
|
|
|
|
|
req.SetBlob(layer_name, blob);
|
|
|
#else
|
|
|
req.SetBlob(layer_name, blob, ctx.uu.preproc_map.at(layer_name));
|
|
|
#endif
|
|
|
}
|
|
|
}
|
|
|
|
|
|
static void setROIBlob(InferenceEngine::InferRequest& req,
|
|
|
const std::string& layer_name,
|
|
|
const IE::Blob::Ptr& blob,
|
|
|
const cv::Rect &roi,
|
|
|
const IECallContext& ctx) {
|
|
|
if (ctx.uu.params.device_id.find("GPU") != std::string::npos &&
|
|
|
ctx.uu.rctx) {
|
|
|
try {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
setBlob(req, layer_name, IE::make_shared_blob(blob, toIE(roi)), ctx);
|
|
|
} catch (const std::exception &ex) {
|
|
|
GAPI_LOG_WARNING(nullptr, "cannot set ROI blob for layer: " << layer_name <<
|
|
|
", reason:\n" << ex.what() <<
|
|
|
"\nTry using self GAPI preprocessing feature: "
|
|
|
" Check method `cfgPreprocessingParams` in `cv::gapi::ie::Params`");
|
|
|
throw;
|
|
|
}
|
|
|
} else {
|
|
|
setBlob(req, layer_name, IE::make_shared_blob(blob, toIE(roi)), ctx);
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
|
|
|
std::vector<InferenceEngine::InferRequest> cv::gimpl::ie::IECompiled::createInferRequests() {
|
|
|
std::vector<InferenceEngine::InferRequest> requests;
|
|
|
requests.reserve(params.nireq);
|
|
|
|
|
|
for (size_t i = 0; i < params.nireq; ++i) {
|
|
|
requests.push_back(this_network.CreateInferRequest());
|
|
|
auto& request = requests.back();
|
|
|
|
|
|
for (auto &&p : params.const_inputs) {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
request.SetBlob(p.first, wrapIE(p.second.first, p.second.second));
|
|
|
}
|
|
|
}
|
|
|
|
|
|
return requests;
|
|
|
}
|
|
|
|
|
|
class IInferExecutor {
|
|
|
public:
|
|
|
using Ptr = std::shared_ptr<IInferExecutor>;
|
|
|
using NotifyCallbackF = std::function<void()>;
|
|
|
using SetInputDataF = std::function<void(InferenceEngine::InferRequest&)>;
|
|
|
using ReadOutputDataF = std::function<void(InferenceEngine::InferRequest&, InferenceEngine::StatusCode)>;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
struct Task {
|
|
|
SetInputDataF set_input_data;
|
|
|
ReadOutputDataF read_output_data;
|
|
|
};
|
|
|
|
|
|
IInferExecutor(IE::InferRequest request, NotifyCallbackF notify)
|
|
|
: m_request(std::move(request)),
|
|
|
m_notify(std::move(notify)) {
|
|
|
};
|
|
|
|
|
|
virtual void execute(const Task& task) = 0;
|
|
|
virtual ~IInferExecutor() = default;
|
|
|
|
|
|
protected:
|
|
|
IE::InferRequest m_request;
|
|
|
NotifyCallbackF m_notify;
|
|
|
};
|
|
|
|
|
|
class SyncInferExecutor : public IInferExecutor {
|
|
|
using IInferExecutor::IInferExecutor;
|
|
|
virtual void execute(const IInferExecutor::Task& task) override;
|
|
|
};
|
|
|
|
|
|
void SyncInferExecutor::execute(const IInferExecutor::Task& task) {
|
|
|
try {
|
|
|
task.set_input_data(m_request);
|
|
|
m_request.Infer();
|
|
|
task.read_output_data(m_request, IE::StatusCode::OK);
|
|
|
} catch (...) {
|
|
|
m_notify();
|
|
|
throw;
|
|
|
}
|
|
|
|
|
|
m_notify();
|
|
|
}
|
|
|
|
|
|
class AsyncInferExecutor : public IInferExecutor {
|
|
|
public:
|
|
|
using IInferExecutor::IInferExecutor;
|
|
|
virtual void execute(const IInferExecutor::Task& task) override;
|
|
|
|
|
|
private:
|
|
|
void callback(Task task,
|
|
|
IE::InferRequest request,
|
|
|
IE::StatusCode code) noexcept;
|
|
|
};
|
|
|
|
|
|
void AsyncInferExecutor::execute(const IInferExecutor::Task& task) {
|
|
|
using namespace std::placeholders;
|
|
|
using callback_t = std::function<void(IE::InferRequest, IE::StatusCode)>;
|
|
|
m_request.SetCompletionCallback(
|
|
|
static_cast<callback_t>(
|
|
|
std::bind(&AsyncInferExecutor::callback, this, task, _1, _2)));
|
|
|
try {
|
|
|
task.set_input_data(m_request);
|
|
|
m_request.StartAsync();
|
|
|
} catch (...) {
|
|
|
m_request.SetCompletionCallback([](){});
|
|
|
m_notify();
|
|
|
throw;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
void AsyncInferExecutor::callback(IInferExecutor::Task task,
|
|
|
IE::InferRequest request,
|
|
|
IE::StatusCode code) noexcept {
|
|
|
task.read_output_data(request, code);
|
|
|
request.SetCompletionCallback([](){});
|
|
|
|
|
|
m_notify();
|
|
|
}
|
|
|
|
|
|
class cv::gimpl::ie::RequestPool {
|
|
|
public:
|
|
|
|
|
|
explicit RequestPool(cv::gapi::ie::InferMode mode,
|
|
|
std::vector<InferenceEngine::InferRequest>&& requests);
|
|
|
|
|
|
IInferExecutor::Ptr getIdleRequest();
|
|
|
void waitAll();
|
|
|
|
|
|
private:
|
|
|
void setup();
|
|
|
void release(const size_t id);
|
|
|
|
|
|
QueueClass<size_t> m_idle_ids;
|
|
|
std::vector<IInferExecutor::Ptr> m_requests;
|
|
|
};
|
|
|
|
|
|
void cv::gimpl::ie::RequestPool::release(const size_t id) {
|
|
|
m_idle_ids.push(id);
|
|
|
}
|
|
|
|
|
|
|
|
|
cv::gimpl::ie::RequestPool::RequestPool(cv::gapi::ie::InferMode mode,
|
|
|
std::vector<InferenceEngine::InferRequest>&& requests) {
|
|
|
for (size_t i = 0; i < requests.size(); ++i) {
|
|
|
IInferExecutor::Ptr iexec = nullptr;
|
|
|
switch (mode) {
|
|
|
case cv::gapi::ie::InferMode::Async:
|
|
|
iexec = std::make_shared<AsyncInferExecutor>(std::move(requests[i]),
|
|
|
std::bind(&RequestPool::release, this, i));
|
|
|
break;
|
|
|
case cv::gapi::ie::InferMode::Sync:
|
|
|
iexec = std::make_shared<SyncInferExecutor>(std::move(requests[i]),
|
|
|
std::bind(&RequestPool::release, this, i));
|
|
|
break;
|
|
|
default:
|
|
|
GAPI_Error("Unsupported cv::gapi::ie::InferMode");
|
|
|
}
|
|
|
m_requests.emplace_back(std::move(iexec));
|
|
|
}
|
|
|
setup();
|
|
|
}
|
|
|
|
|
|
void cv::gimpl::ie::RequestPool::setup() {
|
|
|
for (size_t i = 0; i < m_requests.size(); ++i) {
|
|
|
m_idle_ids.push(i);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
IInferExecutor::Ptr cv::gimpl::ie::RequestPool::getIdleRequest() {
|
|
|
size_t id = 0u;
|
|
|
m_idle_ids.pop(id);
|
|
|
return m_requests[id];
|
|
|
}
|
|
|
|
|
|
|
|
|
void cv::gimpl::ie::RequestPool::waitAll() {
|
|
|
|
|
|
for (size_t i = 0; i < m_requests.size(); ++i) {
|
|
|
size_t id = 0u;
|
|
|
m_idle_ids.pop(id);
|
|
|
}
|
|
|
setup();
|
|
|
}
|
|
|
|
|
|
|
|
|
cv::gimpl::ie::GIEExecutable::GIEExecutable(const ade::Graph &g,
|
|
|
const std::vector<ade::NodeHandle> &nodes)
|
|
|
: m_g(g), m_gm(m_g) {
|
|
|
|
|
|
|
|
|
GConstGIEModel iem(g);
|
|
|
|
|
|
for (auto &nh : nodes) {
|
|
|
switch (m_gm.metadata(nh).get<NodeType>().t) {
|
|
|
case NodeType::OP:
|
|
|
if (this_nh == nullptr) {
|
|
|
this_nh = nh;
|
|
|
this_iec = iem.metadata(this_nh).get<IEUnit>().compile();
|
|
|
m_reqPool.reset(new RequestPool(this_iec.params.mode, this_iec.createInferRequests()));
|
|
|
}
|
|
|
else
|
|
|
util::throw_error(std::logic_error("Multi-node inference is not supported!"));
|
|
|
break;
|
|
|
|
|
|
case NodeType::DATA: {
|
|
|
m_dataNodes.push_back(nh);
|
|
|
const auto &desc = m_gm.metadata(nh).get<Data>();
|
|
|
if (desc.storage == Data::Storage::CONST_VAL) {
|
|
|
util::throw_error(std::logic_error("No const data please!"));
|
|
|
}
|
|
|
if (desc.storage == Data::Storage::INTERNAL) {
|
|
|
util::throw_error(std::logic_error("No internal data please!"));
|
|
|
}
|
|
|
break;
|
|
|
}
|
|
|
default: util::throw_error(std::logic_error("Unsupported NodeType type"));
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
|
|
|
void cv::gimpl::ie::GIEExecutable::run(cv::gimpl::GIslandExecutable::IInput &in,
|
|
|
cv::gimpl::GIslandExecutable::IOutput &out) {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
std::vector<InObj> input_objs;
|
|
|
std::vector<OutObj> output_objs;
|
|
|
|
|
|
const auto &in_desc = in.desc();
|
|
|
auto in_msg = in.get();
|
|
|
|
|
|
if (cv::util::holds_alternative<cv::gimpl::EndOfStream>(in_msg))
|
|
|
{
|
|
|
|
|
|
m_reqPool->waitAll();
|
|
|
out.post(cv::gimpl::EndOfStream{});
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
GAPI_Assert(cv::util::holds_alternative<cv::GRunArgs>(in_msg));
|
|
|
const auto in_vector = cv::util::get<cv::GRunArgs>(in_msg);
|
|
|
|
|
|
cv::GRunArg::Meta stub_meta;
|
|
|
for (auto &&in_arg : in_vector)
|
|
|
{
|
|
|
stub_meta.insert(in_arg.meta.begin(), in_arg.meta.end());
|
|
|
}
|
|
|
|
|
|
|
|
|
input_objs.reserve(in_desc.size());
|
|
|
for (auto &&it: ade::util::zip(ade::util::toRange(in_desc),
|
|
|
ade::util::toRange(in_vector)))
|
|
|
{
|
|
|
input_objs.emplace_back(std::get<0>(it), std::get<1>(it));
|
|
|
}
|
|
|
|
|
|
const auto &out_desc = out.desc();
|
|
|
output_objs.reserve(out_desc.size());
|
|
|
for (auto &&it: ade::util::indexed(ade::util::toRange(out_desc)))
|
|
|
{
|
|
|
output_objs.emplace_back(ade::util::value(it),
|
|
|
out.get(ade::util::checked_cast<int>(ade::util::index(it))));
|
|
|
}
|
|
|
|
|
|
GConstGIEModel giem(m_g);
|
|
|
const auto &uu = giem.metadata(this_nh).get<IEUnit>();
|
|
|
const auto &op = m_gm.metadata(this_nh).get<Op>();
|
|
|
|
|
|
auto ctx = std::make_shared<IECallContext>(uu, out, op.args, op.outs,
|
|
|
std::move(stub_meta), std::move(input_objs), std::move(output_objs));
|
|
|
|
|
|
const auto &kk = giem.metadata(this_nh).get<IECallable>();
|
|
|
|
|
|
|
|
|
try {
|
|
|
kk.run(ctx, *m_reqPool);
|
|
|
} catch (...) {
|
|
|
auto eptr = std::current_exception();
|
|
|
for (auto i : ade::util::iota(ctx->uu.params.num_out))
|
|
|
{
|
|
|
auto output = ctx->output(i);
|
|
|
ctx->out.meta(output, ctx->getMeta());
|
|
|
ctx->out.post(std::move(output), eptr);
|
|
|
}
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (!m_gm.metadata().contains<Streaming>()) {
|
|
|
m_reqPool->waitAll();
|
|
|
}
|
|
|
}
|
|
|
|
|
|
namespace cv {
|
|
|
namespace gimpl {
|
|
|
namespace ie {
|
|
|
static void configureInputReshapeByImage(const IE::InputInfo::Ptr& ii,
|
|
|
const cv::GMetaArg mm,
|
|
|
IE::ICNNNetwork::InputShapes& input_reshape_table) {
|
|
|
const auto& layer_name = ii->name();
|
|
|
|
|
|
const auto name_pos_in_table = input_reshape_table.find(layer_name);
|
|
|
|
|
|
|
|
|
|
|
|
if (name_pos_in_table != input_reshape_table.end()) {
|
|
|
GAPI_Assert(false &&
|
|
|
"Names of layers for reshape with specified dimensions shouldn't intersect with names for reshape by image");
|
|
|
}
|
|
|
cv::Size image_sz;
|
|
|
switch (mm.index()) {
|
|
|
case cv::GMetaArg::index_of<cv::GMatDesc>():
|
|
|
{
|
|
|
const auto &meta = util::get<cv::GMatDesc>(mm);
|
|
|
image_sz = meta.size;
|
|
|
break;
|
|
|
}
|
|
|
case cv::GMetaArg::index_of<cv::GFrameDesc>():
|
|
|
{
|
|
|
const auto &meta = util::get<cv::GFrameDesc>(mm);
|
|
|
image_sz = meta.size;
|
|
|
break;
|
|
|
}
|
|
|
default:
|
|
|
util::throw_error(std::runtime_error("Unsupported input meta for IE backend"));
|
|
|
}
|
|
|
auto input_dims = ii->getTensorDesc().getDims();
|
|
|
const auto size = input_dims.size();
|
|
|
if (size <= 1) {
|
|
|
GAPI_Error("Unsupported number of dimensions for reshape by image");
|
|
|
}
|
|
|
input_dims.at(size - 2) = static_cast<size_t>(image_sz.height);
|
|
|
input_dims.at(size - 1) = static_cast<size_t>(image_sz.width);
|
|
|
|
|
|
input_reshape_table.emplace(layer_name, input_dims);
|
|
|
}
|
|
|
|
|
|
static void cfgInputPrecision(const IE::InputInfo::Ptr& ii, const cv::GMetaArg mm) {
|
|
|
switch (mm.index()) {
|
|
|
case cv::GMetaArg::index_of<cv::GMatDesc>(): {
|
|
|
const auto &desc = util::get<cv::GMatDesc>(mm);
|
|
|
ii->setPrecision(toIE(desc.depth));
|
|
|
break;
|
|
|
}
|
|
|
case cv::GMetaArg::index_of<cv::GFrameDesc>():
|
|
|
ii->setPrecision(toIE(CV_8U));
|
|
|
break;
|
|
|
default:
|
|
|
util::throw_error(std::runtime_error("Unsupported input meta for IE backend"));
|
|
|
}
|
|
|
}
|
|
|
|
|
|
static void cfgImagePreprocessing(const IE::InputInfo::Ptr &ii,
|
|
|
const cv::GMetaArg &mm,
|
|
|
const IE::ResizeAlgorithm interp) {
|
|
|
if (!cv::util::holds_alternative<cv::GMatDesc>(mm) &&
|
|
|
!cv::util::holds_alternative<cv::GFrameDesc>(mm)) {
|
|
|
util::throw_error(std::runtime_error("Unsupported input meta for IE backend"));
|
|
|
}
|
|
|
|
|
|
ii->getPreProcess().setResizeAlgorithm(interp);
|
|
|
if (cv::util::holds_alternative<cv::GFrameDesc>(mm)) {
|
|
|
const auto &meta = util::get<cv::GFrameDesc>(mm);
|
|
|
if (meta.fmt == cv::MediaFormat::NV12) {
|
|
|
#if INF_ENGINE_RELEASE > 2023000000
|
|
|
cv::util::throw_error(std::logic_error(
|
|
|
"IE Backend: cv::MediaFrame with NV12 format is no longer supported"
|
|
|
" because NV12 feature has been deprecated in OpenVINO 1.0 API."
|
|
|
" The last version which supports this is 2023.0"));
|
|
|
#else
|
|
|
ii->getPreProcess().setColorFormat(IE::ColorFormat::NV12);
|
|
|
#endif
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void cfgInputPreprocessing(const cv::gapi::ie::TraitAs trait,
|
|
|
const IE::InputInfo::Ptr &ii,
|
|
|
const cv::GMetaArg &mm,
|
|
|
const std::string &layer_name,
|
|
|
const AttrMap<std::string> &layout_map,
|
|
|
const AttrMap<int> &interp_map) {
|
|
|
cfgInputPrecision(ii, mm);
|
|
|
const auto explicit_input_layout = lookUp(layout_map, layer_name);
|
|
|
const auto explicit_resize = lookUp(interp_map, layer_name);
|
|
|
if (trait == cv::gapi::ie::TraitAs::IMAGE) {
|
|
|
|
|
|
GAPI_LOG_DEBUG(NULL, "IE Backend: Input: \"" <<
|
|
|
layer_name << " " << mm << "\" is image.");
|
|
|
|
|
|
if (explicit_input_layout) {
|
|
|
util::throw_error(std::logic_error("Input data provided for layer: \"" +
|
|
|
layer_name + "\" is recognized as \"image\". Explicitly" +
|
|
|
" specified layout is prohibited."));
|
|
|
}
|
|
|
const auto interp = explicit_resize ? toIEInterp(*explicit_resize)
|
|
|
: IE::RESIZE_BILINEAR;
|
|
|
cfgImagePreprocessing(ii, mm, interp);
|
|
|
} else {
|
|
|
|
|
|
GAPI_LOG_DEBUG(NULL, "IE Backend: Input: \"" <<
|
|
|
layer_name << "\" " << mm << " is tensor.");
|
|
|
if (explicit_input_layout) {
|
|
|
GAPI_LOG_DEBUG(NULL, "IE Backend: Set input layout \"" <<
|
|
|
*explicit_input_layout << "\" for layer \"" << layer_name << "\"");
|
|
|
ii->setLayout(toIE(*explicit_input_layout));
|
|
|
}
|
|
|
if (explicit_resize) {
|
|
|
GAPI_LOG_DEBUG(NULL, "IE Backend: Set resize for layer \"" << layer_name << "\"");
|
|
|
ii->getPreProcess().setResizeAlgorithm(toIEInterp(*explicit_resize));
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
|
|
|
static IE::PreProcessInfo createImagePreProcInfo(const cv::GMetaArg &mm,
|
|
|
const IE::ResizeAlgorithm interp) {
|
|
|
if (!cv::util::holds_alternative<cv::GMatDesc>(mm) &&
|
|
|
!cv::util::holds_alternative<cv::GFrameDesc>(mm)) {
|
|
|
util::throw_error(std::runtime_error("Unsupported input meta for IE backend"));
|
|
|
}
|
|
|
IE::PreProcessInfo info;
|
|
|
info.setResizeAlgorithm(interp);
|
|
|
if (cv::util::holds_alternative<cv::GFrameDesc>(mm)) {
|
|
|
const auto &meta = util::get<cv::GFrameDesc>(mm);
|
|
|
if (meta.fmt == cv::MediaFormat::NV12) {
|
|
|
#if INF_ENGINE_RELEASE > 2023000000
|
|
|
cv::util::throw_error(std::logic_error(
|
|
|
"IE Backend: cv::MediaFrame with NV12 format is no longer supported"
|
|
|
" because NV12 feature has been deprecated in OpenVINO 1.0 API."
|
|
|
" The last version which supports this is 2023.0"));
|
|
|
#else
|
|
|
info.setColorFormat(IE::ColorFormat::NV12);
|
|
|
#endif
|
|
|
}
|
|
|
}
|
|
|
return info;
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static IE::PreProcessInfo createPreProcInfo(const cv::gapi::ie::TraitAs trait,
|
|
|
const cv::GMetaArg& mm,
|
|
|
const cv::optional<int> explicit_resize) {
|
|
|
if (trait == cv::gapi::ie::TraitAs::IMAGE) {
|
|
|
const auto interp = explicit_resize ? toIEInterp(*explicit_resize)
|
|
|
: IE::RESIZE_BILINEAR;
|
|
|
return createImagePreProcInfo(mm, interp);
|
|
|
}
|
|
|
|
|
|
IE::PreProcessInfo info;
|
|
|
if (explicit_resize) {
|
|
|
info.setResizeAlgorithm(toIEInterp(*explicit_resize));
|
|
|
}
|
|
|
return info;
|
|
|
}
|
|
|
|
|
|
using namespace cv::gapi::ie::detail;
|
|
|
static void configureOutputPrecision(const IE::OutputsDataMap &outputs_info,
|
|
|
const ParamDesc::PrecisionVariantT &output_precision) {
|
|
|
cv::util::visit(cv::util::overload_lambdas(
|
|
|
[&outputs_info](ParamDesc::PrecisionT cvdepth) {
|
|
|
auto precision = toIE(cvdepth);
|
|
|
for (auto it : outputs_info) {
|
|
|
it.second->setPrecision(precision);
|
|
|
}
|
|
|
},
|
|
|
[&outputs_info](const ParamDesc::PrecisionMapT& precision_map) {
|
|
|
for (auto it : precision_map) {
|
|
|
outputs_info.at(it.first)->setPrecision(toIE(it.second));
|
|
|
}
|
|
|
},
|
|
|
[&outputs_info](cv::util::monostate) {
|
|
|
|
|
|
}
|
|
|
), output_precision
|
|
|
);
|
|
|
}
|
|
|
|
|
|
static void configureOutputLayout(const IE::OutputsDataMap &outputs_info,
|
|
|
const AttrMap<std::string> &output_layout) {
|
|
|
for (const auto it : output_layout) {
|
|
|
outputs_info.at(it.first)->setLayout(toIE(it.second));
|
|
|
}
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void PostOutputs(InferenceEngine::InferRequest &request,
|
|
|
InferenceEngine::StatusCode code,
|
|
|
std::shared_ptr<IECallContext> ctx) {
|
|
|
GAPI_ITT_STATIC_LOCAL_HANDLE(ie_cb_post_outputs_hndl, "IE_async_callback_PostOutputs");
|
|
|
GAPI_ITT_AUTO_TRACE_GUARD(ie_cb_post_outputs_hndl);
|
|
|
|
|
|
if (code != IE::StatusCode::OK) {
|
|
|
std::stringstream ss;
|
|
|
ss << "InferRequest for model: " << ctx->uu.params.model_path
|
|
|
<< " finished with InferenceEngine::StatusCode: " << static_cast<int>(code);
|
|
|
ctx->eptr = std::make_exception_ptr(std::logic_error(ss.str()));
|
|
|
}
|
|
|
|
|
|
for (auto i : ade::util::iota(ctx->uu.params.num_out)) {
|
|
|
auto& out_mat = ctx->outMatR(i);
|
|
|
IE::Blob::Ptr this_blob = request.GetBlob(ctx->uu.params.output_names[i]);
|
|
|
copyFromIE(this_blob, out_mat);
|
|
|
auto output = ctx->output(i);
|
|
|
ctx->out.meta(output, ctx->getMeta());
|
|
|
ctx->out.post(std::move(output), ctx->eptr);
|
|
|
}
|
|
|
|
|
|
ctx->views.clear();
|
|
|
ctx->releaseKeepAliveFrame(&request);
|
|
|
}
|
|
|
|
|
|
class PostOutputsList {
|
|
|
public:
|
|
|
PostOutputsList(size_t size,
|
|
|
std::shared_ptr<IECallContext> ctx,
|
|
|
std::vector<std::vector<int>>&& cached_dims);
|
|
|
|
|
|
void operator()(InferenceEngine::InferRequest &request,
|
|
|
InferenceEngine::StatusCode code,
|
|
|
size_t pos) const;
|
|
|
|
|
|
private:
|
|
|
struct Priv {
|
|
|
size_t size;
|
|
|
std::atomic<size_t> finished{0u};
|
|
|
std::shared_ptr<IECallContext> ctx;
|
|
|
std::vector<std::vector<int>> cached_dims;
|
|
|
};
|
|
|
std::shared_ptr<Priv> m_priv;
|
|
|
};
|
|
|
|
|
|
PostOutputsList::PostOutputsList(size_t size,
|
|
|
std::shared_ptr<IECallContext> ctx,
|
|
|
std::vector<std::vector<int>>&& cached_dims)
|
|
|
: m_priv(new Priv()) {
|
|
|
m_priv->size = size;
|
|
|
m_priv->ctx = ctx;
|
|
|
m_priv->cached_dims = std::move(cached_dims);
|
|
|
}
|
|
|
|
|
|
void PostOutputsList::operator()(InferenceEngine::InferRequest &req,
|
|
|
InferenceEngine::StatusCode code,
|
|
|
size_t pos) const {
|
|
|
auto&& ctx = m_priv->ctx;
|
|
|
auto&& cached_dims = m_priv->cached_dims;
|
|
|
auto&& finished = m_priv->finished;
|
|
|
auto&& size = m_priv->size;
|
|
|
|
|
|
if (code != IE::StatusCode::OK) {
|
|
|
ctx->eptr = std::make_exception_ptr(
|
|
|
std::logic_error("IE::InferRequest finished with not OK status"));
|
|
|
}
|
|
|
|
|
|
if (!ctx->eptr) {
|
|
|
for (auto i : ade::util::iota(ctx->uu.params.num_out)) {
|
|
|
std::vector<cv::Mat> &out_vec = ctx->outVecR<cv::Mat>(i);
|
|
|
|
|
|
IE::Blob::Ptr out_blob = req.GetBlob(ctx->uu.params.output_names[i]);
|
|
|
GAPI_Assert(out_blob);
|
|
|
|
|
|
|
|
|
out_vec[pos].create(cached_dims[i], toCV(out_blob->getTensorDesc().getPrecision()));
|
|
|
copyFromIE(out_blob, out_vec[pos]);
|
|
|
}
|
|
|
}
|
|
|
++finished;
|
|
|
|
|
|
if (finished == size) {
|
|
|
for (auto i : ade::util::iota(ctx->uu.params.num_out)) {
|
|
|
auto output = ctx->output(i);
|
|
|
ctx->out.meta(output, ctx->getMeta());
|
|
|
ctx->out.post(std::move(output), ctx->eptr);
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
|
|
|
struct Infer: public cv::detail::KernelTag {
|
|
|
using API = cv::GInferBase;
|
|
|
static cv::gapi::GBackend backend() { return cv::gapi::ie::backend(); }
|
|
|
static KImpl kernel() { return KImpl{outMeta, run}; }
|
|
|
|
|
|
static cv::GMetaArgs outMeta(const ade::Graph &gr,
|
|
|
const ade::NodeHandle &nh,
|
|
|
const cv::GMetaArgs &in_metas,
|
|
|
const cv::GArgs &) {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cv::GMetaArgs result;
|
|
|
|
|
|
GConstGIEModel gm(gr);
|
|
|
const auto &uu = gm.metadata(nh).get<IEUnit>();
|
|
|
IE::ICNNNetwork::InputShapes input_reshape_table = uu.params.reshape_table;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
GAPI_Assert(uu.params.input_names.size() == in_metas.size()
|
|
|
&& "Known input layers count doesn't match input meta count");
|
|
|
|
|
|
const auto input_layout = broadcastLayerAttr(uu.params.input_layout,
|
|
|
uu.params.input_names);
|
|
|
const auto interpolation = broadcastLayerAttr(uu.params.interpolation,
|
|
|
uu.params.input_names);
|
|
|
|
|
|
|
|
|
using namespace cv::gapi::ie::detail;
|
|
|
if (uu.params.kind == ParamDesc::Kind::Load) {
|
|
|
auto inputs = uu.net.getInputsInfo();
|
|
|
for (auto &&it : ade::util::zip(ade::util::toRange(uu.params.input_names),
|
|
|
ade::util::toRange(in_metas))) {
|
|
|
const auto &input_name = std::get<0>(it);
|
|
|
auto ii = inputs.at(input_name);
|
|
|
const auto &mm = std::get<1>(it);
|
|
|
|
|
|
if (uu.params.layer_names_to_reshape.find(input_name) !=
|
|
|
uu.params.layer_names_to_reshape.end()) {
|
|
|
configureInputReshapeByImage(ii, mm, input_reshape_table);
|
|
|
}
|
|
|
const auto trait = clarifyTrait(mm, ii->getTensorDesc().getDims());
|
|
|
|
|
|
|
|
|
const_cast<IEUnit&>(uu).inputs_type.emplace(input_name, trait);
|
|
|
cfgInputPreprocessing(trait, ii, mm, input_name,
|
|
|
input_layout, interpolation);
|
|
|
|
|
|
if (uu.net_input_params.is_applicable(mm)) {
|
|
|
const_cast<IEUnit::InputFramesDesc &>(uu.net_input_params)
|
|
|
.set_param(input_name, ii->getTensorDesc());
|
|
|
}
|
|
|
}
|
|
|
for (auto &&p : uu.params.const_inputs) {
|
|
|
const auto ii = inputs.at(p.first);
|
|
|
ii->setPrecision(toIE(p.second.first.depth()));
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (!input_reshape_table.empty()) {
|
|
|
const_cast<IE::CNNNetwork *>(&uu.net)->reshape(input_reshape_table);
|
|
|
}
|
|
|
|
|
|
const auto output_layout = broadcastLayerAttr(uu.params.output_layout,
|
|
|
uu.params.output_names);
|
|
|
configureOutputLayout(uu.net.getOutputsInfo(), output_layout);
|
|
|
configureOutputPrecision(uu.net.getOutputsInfo(), uu.params.output_precision);
|
|
|
} else {
|
|
|
GAPI_Assert(uu.params.kind == ParamDesc::Kind::Import);
|
|
|
auto inputs = uu.this_network.GetInputsInfo();
|
|
|
|
|
|
auto* non_const_prepm = const_cast<IEUnit::PreProcMap*>(&uu.preproc_map);
|
|
|
for (auto &&it : ade::util::zip(ade::util::toRange(uu.params.input_names),
|
|
|
ade::util::toRange(in_metas))) {
|
|
|
const auto &input_name = std::get<0>(it);
|
|
|
auto ii = inputs.at(input_name);
|
|
|
const auto & mm = std::get<1>(it);
|
|
|
const auto trait = clarifyTrait(mm, ii->getTensorDesc().getDims());
|
|
|
|
|
|
|
|
|
const_cast<IEUnit&>(uu).inputs_type.emplace(input_name, trait);
|
|
|
const auto explicit_resize = lookUp(interpolation, input_name);
|
|
|
non_const_prepm->emplace(
|
|
|
input_name, createPreProcInfo(trait, mm, explicit_resize));
|
|
|
|
|
|
|
|
|
if (uu.net_input_params.is_applicable(mm)) {
|
|
|
const_cast<IEUnit::InputFramesDesc &>(uu.net_input_params)
|
|
|
.set_param(input_name, ii->getTensorDesc());
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for (const auto &out_name : uu.params.output_names) {
|
|
|
|
|
|
|
|
|
const auto& desc =
|
|
|
uu.params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Load
|
|
|
? uu.net.getOutputsInfo().at(out_name)->getTensorDesc()
|
|
|
: uu.this_network.GetOutputsInfo().at(out_name)->getTensorDesc();
|
|
|
|
|
|
cv::GMatDesc outm(toCV(desc.getPrecision()),
|
|
|
toCVDims(toCV(desc.getDims()), desc.getLayout()));
|
|
|
result.emplace_back(outm);
|
|
|
}
|
|
|
return result;
|
|
|
}
|
|
|
|
|
|
static void run(std::shared_ptr<IECallContext> ctx,
|
|
|
cv::gimpl::ie::RequestPool &reqPool) {
|
|
|
using namespace std::placeholders;
|
|
|
reqPool.getIdleRequest()->execute(
|
|
|
IInferExecutor::Task {
|
|
|
[ctx](InferenceEngine::InferRequest &req) {
|
|
|
|
|
|
|
|
|
for (auto i : ade::util::iota(ctx->uu.params.num_in)) {
|
|
|
const auto& layer_name = ctx->uu.params.input_names[i];
|
|
|
const auto hint = ctx->getInputType(layer_name);
|
|
|
const auto layout = req.GetBlob(layer_name)->getTensorDesc().getLayout();
|
|
|
IE::Blob::Ptr this_blob = extractBlob(*ctx, i, hint,
|
|
|
layout, layer_name,
|
|
|
cv::util::optional<cv::Rect>{});
|
|
|
setBlob(req, layer_name, this_blob, *ctx);
|
|
|
}
|
|
|
},
|
|
|
std::bind(PostOutputs, _1, _2, ctx)
|
|
|
}
|
|
|
);
|
|
|
}
|
|
|
};
|
|
|
|
|
|
struct InferROI: public cv::detail::KernelTag {
|
|
|
using API = cv::GInferROIBase;
|
|
|
static cv::gapi::GBackend backend() { return cv::gapi::ie::backend(); }
|
|
|
static KImpl kernel() { return KImpl{outMeta, run}; }
|
|
|
|
|
|
static cv::GMetaArgs outMeta(const ade::Graph &gr,
|
|
|
const ade::NodeHandle &nh,
|
|
|
const cv::GMetaArgs &in_metas,
|
|
|
const cv::GArgs &) {
|
|
|
cv::GMetaArgs result;
|
|
|
|
|
|
GConstGIEModel gm(gr);
|
|
|
const auto &uu = gm.metadata(nh).get<IEUnit>();
|
|
|
IE::ICNNNetwork::InputShapes input_reshape_table = uu.params.reshape_table;
|
|
|
|
|
|
|
|
|
|
|
|
GAPI_Assert(1u == uu.params.input_names.size());
|
|
|
GAPI_Assert(2u == in_metas.size());
|
|
|
|
|
|
const auto &input_name = uu.params.input_names.at(0);
|
|
|
auto &&mm = in_metas.at(1u);
|
|
|
const auto &tensor_desc =
|
|
|
(uu.params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Load)
|
|
|
? uu.net.getInputsInfo().at(input_name)->getTensorDesc()
|
|
|
: uu.this_network.GetInputsInfo().at(input_name)->getTensorDesc();
|
|
|
|
|
|
if (cv::util::holds_alternative<cv::GMatDesc>(mm) ||
|
|
|
cv::util::holds_alternative<cv::GFrameDesc>(mm)) {
|
|
|
const auto trait = clarifyTrait(mm, tensor_desc.getDims());
|
|
|
if (trait != cv::gapi::ie::TraitAs::IMAGE) {
|
|
|
util::throw_error(std::runtime_error(
|
|
|
"IE Backend: Only image is supported"
|
|
|
" as the 1th argument for InferROI"));
|
|
|
}
|
|
|
} else {
|
|
|
util::throw_error(std::runtime_error(
|
|
|
"IE Backend: Unsupported input meta for"
|
|
|
" 1th argument for InferROI"));
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
const auto input_layout = broadcastLayerAttr(uu.params.input_layout,
|
|
|
uu.params.input_names);
|
|
|
const auto interpolation = broadcastLayerAttr(uu.params.interpolation,
|
|
|
uu.params.input_names);
|
|
|
const auto trait = cv::gapi::ie::TraitAs::IMAGE;
|
|
|
if (uu.params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Load) {
|
|
|
|
|
|
auto inputs = uu.net.getInputsInfo();
|
|
|
auto ii = inputs.at(input_name);
|
|
|
|
|
|
if (uu.params.layer_names_to_reshape.find(input_name) !=
|
|
|
uu.params.layer_names_to_reshape.end()) {
|
|
|
configureInputReshapeByImage(ii, mm, input_reshape_table);
|
|
|
}
|
|
|
cfgInputPreprocessing(trait, ii, mm, input_name,
|
|
|
input_layout, interpolation);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (!input_reshape_table.empty()) {
|
|
|
const_cast<IE::CNNNetwork *>(&uu.net)->reshape(input_reshape_table);
|
|
|
}
|
|
|
|
|
|
|
|
|
if (uu.net_input_params.is_applicable(mm)) {
|
|
|
const_cast<IEUnit::InputFramesDesc &>(uu.net_input_params)
|
|
|
.set_param(input_name, ii->getTensorDesc());
|
|
|
}
|
|
|
|
|
|
for (auto &&p : uu.params.const_inputs) {
|
|
|
inputs.at(p.first)->setPrecision(toIE(p.second.first.depth()));
|
|
|
}
|
|
|
|
|
|
const auto output_layout = broadcastLayerAttr(uu.params.output_layout,
|
|
|
uu.params.output_names);
|
|
|
configureOutputLayout(uu.net.getOutputsInfo(), output_layout);
|
|
|
configureOutputPrecision(uu.net.getOutputsInfo(), uu.params.output_precision);
|
|
|
} else {
|
|
|
GAPI_Assert(uu.params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Import);
|
|
|
auto inputs = uu.this_network.GetInputsInfo();
|
|
|
|
|
|
auto* non_const_prepm = const_cast<IEUnit::PreProcMap*>(&uu.preproc_map);
|
|
|
auto ii = inputs.at(input_name);
|
|
|
const auto explicit_resize = lookUp(interpolation, input_name);
|
|
|
non_const_prepm->emplace(
|
|
|
input_name, createPreProcInfo(trait, mm, explicit_resize));
|
|
|
|
|
|
|
|
|
if (uu.net_input_params.is_applicable(mm)) {
|
|
|
const_cast<IEUnit::InputFramesDesc &>(uu.net_input_params)
|
|
|
.set_param(input_name, ii->getTensorDesc());
|
|
|
}
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for (const auto &out_name : uu.params.output_names) {
|
|
|
|
|
|
|
|
|
const auto& desc =
|
|
|
uu.params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Load
|
|
|
? uu.net.getOutputsInfo().at(out_name)->getTensorDesc()
|
|
|
: uu.this_network.GetOutputsInfo().at(out_name)->getTensorDesc();
|
|
|
|
|
|
cv::GMatDesc outm(toCV(desc.getPrecision()),
|
|
|
toCVDims(toCV(desc.getDims()), desc.getLayout()));
|
|
|
result.emplace_back(outm);
|
|
|
}
|
|
|
return result;
|
|
|
}
|
|
|
|
|
|
static void run(std::shared_ptr<IECallContext> ctx,
|
|
|
cv::gimpl::ie::RequestPool &reqPool) {
|
|
|
using namespace std::placeholders;
|
|
|
reqPool.getIdleRequest()->execute(
|
|
|
IInferExecutor::Task {
|
|
|
[ctx](InferenceEngine::InferRequest &req) {
|
|
|
GAPI_Assert(ctx->uu.params.num_in == 1);
|
|
|
auto&& this_roi = ctx->inArg<cv::detail::OpaqueRef>(0).rref<cv::Rect>();
|
|
|
|
|
|
|
|
|
cv::MediaFrame* slot_ptr = ctx->prepareKeepAliveFrameSlot(&req);
|
|
|
|
|
|
|
|
|
|
|
|
bool preprocessed = false;
|
|
|
IE::Blob::Ptr this_blob =
|
|
|
extractBlob(*ctx, 1, cv::gapi::ie::TraitAs::IMAGE,
|
|
|
IE::Layout::ANY,
|
|
|
*(ctx->uu.params.input_names.begin()),
|
|
|
cv::util::make_optional(this_roi),
|
|
|
slot_ptr, &preprocessed);
|
|
|
if (!preprocessed) {
|
|
|
setROIBlob(req,
|
|
|
*(ctx->uu.params.input_names.begin()),
|
|
|
this_blob, this_roi, *ctx);
|
|
|
} else {
|
|
|
setBlob(req,
|
|
|
*(ctx->uu.params.input_names.begin()),
|
|
|
this_blob, *ctx);
|
|
|
}
|
|
|
},
|
|
|
std::bind(PostOutputs, _1, _2, ctx)
|
|
|
}
|
|
|
);
|
|
|
}
|
|
|
};
|
|
|
|
|
|
|
|
|
struct InferList: public cv::detail::KernelTag {
|
|
|
using API = cv::GInferListBase;
|
|
|
static cv::gapi::GBackend backend() { return cv::gapi::ie::backend(); }
|
|
|
static KImpl kernel() { return KImpl{outMeta, run}; }
|
|
|
|
|
|
static cv::GMetaArgs outMeta(const ade::Graph &gr,
|
|
|
const ade::NodeHandle &nh,
|
|
|
const cv::GMetaArgs &in_metas,
|
|
|
const cv::GArgs &) {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
GConstGIEModel gm(gr);
|
|
|
const auto &uu = gm.metadata(nh).get<IEUnit>();
|
|
|
IE::ICNNNetwork::InputShapes input_reshape_table = uu.params.reshape_table;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
GAPI_Assert(uu.params.input_names.size() == (in_metas.size() - 1u)
|
|
|
&& "Known input layers count doesn't match input meta count");
|
|
|
|
|
|
|
|
|
|
|
|
const auto input_layout = broadcastLayerAttr(uu.params.input_layout,
|
|
|
uu.params.input_names);
|
|
|
const auto interpolation = broadcastLayerAttr(uu.params.interpolation,
|
|
|
uu.params.input_names);
|
|
|
if (uu.params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Load) {
|
|
|
std::size_t idx = 1u;
|
|
|
auto inputs = uu.net.getInputsInfo();
|
|
|
for (auto &&input_name : uu.params.input_names) {
|
|
|
auto ii = inputs.at(input_name);
|
|
|
const auto & mm = in_metas[idx++];
|
|
|
|
|
|
|
|
|
const auto input_trait = clarifyTrait(mm, ii->getTensorDesc().getDims());
|
|
|
if (input_trait != cv::gapi::ie::TraitAs::IMAGE) {
|
|
|
util::throw_error(std::runtime_error(
|
|
|
"IE Backend: Only image is supported"
|
|
|
" as the " + std::to_string(idx) + "th argument for InferList"));
|
|
|
}
|
|
|
|
|
|
if (uu.params.layer_names_to_reshape.find(input_name) !=
|
|
|
uu.params.layer_names_to_reshape.end()) {
|
|
|
configureInputReshapeByImage(ii, mm, input_reshape_table);
|
|
|
}
|
|
|
cfgInputPreprocessing(input_trait, ii, mm,
|
|
|
input_name, input_layout, interpolation);
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (!input_reshape_table.empty()) {
|
|
|
const_cast<IE::CNNNetwork *>(&uu.net)->reshape(input_reshape_table);
|
|
|
}
|
|
|
|
|
|
for (auto &&p : uu.params.const_inputs) {
|
|
|
const auto ii = inputs.at(p.first);
|
|
|
ii->setPrecision(toIE(p.second.first.depth()));
|
|
|
}
|
|
|
|
|
|
const auto output_layout = broadcastLayerAttr(uu.params.output_layout,
|
|
|
uu.params.output_names);
|
|
|
configureOutputLayout(uu.net.getOutputsInfo(), output_layout);
|
|
|
configureOutputPrecision(uu.net.getOutputsInfo(), uu.params.output_precision);
|
|
|
} else {
|
|
|
GAPI_Assert(uu.params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Import);
|
|
|
std::size_t idx = 1u;
|
|
|
auto inputs = uu.this_network.GetInputsInfo();
|
|
|
auto* non_const_prepm = const_cast<IEUnit::PreProcMap*>(&uu.preproc_map);
|
|
|
for (auto &&input_name : uu.params.input_names) {
|
|
|
auto ii = inputs.at(input_name);
|
|
|
const auto & mm = in_metas[idx++];
|
|
|
|
|
|
|
|
|
const auto input_trait = clarifyTrait(mm, ii->getTensorDesc().getDims());
|
|
|
if (input_trait != cv::gapi::ie::TraitAs::IMAGE) {
|
|
|
util::throw_error(std::runtime_error(
|
|
|
"IE Backend: Only image is supported"
|
|
|
" as the " + std::to_string(idx) + "th argument for InferList"));
|
|
|
}
|
|
|
|
|
|
const auto explicit_resize = lookUp(interpolation, input_name);
|
|
|
non_const_prepm->emplace(
|
|
|
input_name, createPreProcInfo(input_trait, mm, explicit_resize));
|
|
|
}
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return cv::GMetaArgs(uu.params.output_names.size(),
|
|
|
cv::GMetaArg{cv::empty_array_desc()});
|
|
|
}
|
|
|
|
|
|
static void run(std::shared_ptr<IECallContext> ctx,
|
|
|
cv::gimpl::ie::RequestPool &reqPool) {
|
|
|
const auto& in_roi_vec = ctx->inArg<cv::detail::VectorRef>(0u).rref<cv::Rect>();
|
|
|
|
|
|
if (in_roi_vec.empty()) {
|
|
|
for (auto i : ade::util::iota(ctx->uu.params.num_out)) {
|
|
|
auto output = ctx->output(i);
|
|
|
ctx->out.meta(output, ctx->getMeta());
|
|
|
ctx->out.post(std::move(output));
|
|
|
}
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
IE::Blob::Ptr this_blob = extractBlob(*ctx, 1, cv::gapi::ie::TraitAs::IMAGE,
|
|
|
IE::Layout::ANY,
|
|
|
ctx->uu.params.input_names[0u],
|
|
|
cv::util::optional<cv::Rect>{});
|
|
|
|
|
|
std::vector<std::vector<int>> cached_dims(ctx->uu.params.num_out);
|
|
|
for (auto i : ade::util::iota(ctx->uu.params.num_out)) {
|
|
|
const auto& out_name = ctx->uu.params.output_names[i];
|
|
|
const auto& desc =
|
|
|
ctx->uu.params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Load
|
|
|
? ctx->uu.net.getOutputsInfo().at(out_name)->getTensorDesc()
|
|
|
: ctx->uu.this_network.GetOutputsInfo().at(out_name)->getTensorDesc();
|
|
|
cached_dims[i] = toCVDims(toCV(desc.getDims()), desc.getLayout());
|
|
|
|
|
|
|
|
|
auto& out_vec = ctx->outVecR<cv::Mat>(i);
|
|
|
out_vec.clear();
|
|
|
out_vec.resize(in_roi_vec.size());
|
|
|
}
|
|
|
|
|
|
PostOutputsList callback(in_roi_vec.size(), ctx, std::move(cached_dims));
|
|
|
for (auto&& it : ade::util::indexed(in_roi_vec)) {
|
|
|
auto pos = ade::util::index(it);
|
|
|
const auto& rc = ade::util::value(it);
|
|
|
reqPool.getIdleRequest()->execute(
|
|
|
IInferExecutor::Task {
|
|
|
[ctx, rc, this_blob](InferenceEngine::InferRequest &req) {
|
|
|
setROIBlob(req, ctx->uu.params.input_names[0u], this_blob, rc, *ctx);
|
|
|
},
|
|
|
std::bind(callback, std::placeholders::_1, std::placeholders::_2, pos)
|
|
|
}
|
|
|
);
|
|
|
}
|
|
|
}
|
|
|
};
|
|
|
|
|
|
struct InferList2: public cv::detail::KernelTag {
|
|
|
using API = cv::GInferList2Base;
|
|
|
static cv::gapi::GBackend backend() { return cv::gapi::ie::backend(); }
|
|
|
static KImpl kernel() { return KImpl{outMeta, run}; }
|
|
|
|
|
|
static cv::GMetaArgs outMeta(const ade::Graph &gr,
|
|
|
const ade::NodeHandle &nh,
|
|
|
const cv::GMetaArgs &in_metas,
|
|
|
const cv::GArgs &) {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
GConstGIEModel gm(gr);
|
|
|
const auto &uu = gm.metadata(nh).get<IEUnit>();
|
|
|
IE::ICNNNetwork::InputShapes input_reshape_table = uu.params.reshape_table;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
GAPI_Assert(uu.params.input_names.size() == (in_metas.size() - 1u)
|
|
|
&& "Known input layers count doesn't match input meta count");
|
|
|
|
|
|
const auto &op = gm.metadata(nh).get<Op>();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
const auto &input_name_0 = uu.params.input_names.front();
|
|
|
const auto &mm_0 = in_metas[0u];
|
|
|
const auto &tensor_desc_0 =
|
|
|
(uu.params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Load)
|
|
|
? uu.net.getInputsInfo().at(input_name_0)->getTensorDesc()
|
|
|
: uu.this_network.GetInputsInfo().at(input_name_0)->getTensorDesc();
|
|
|
|
|
|
if (!(cv::util::holds_alternative<cv::GMatDesc>(mm_0) ||
|
|
|
cv::util::holds_alternative<cv::GFrameDesc>(mm_0))) {
|
|
|
util::throw_error(std::runtime_error(
|
|
|
"IE Backend: Unsupported input meta"
|
|
|
" for 0th argument in IE backend"));
|
|
|
}
|
|
|
|
|
|
std::size_t idx = 1u;
|
|
|
const auto input_layout = broadcastLayerAttr(uu.params.input_layout,
|
|
|
uu.params.input_names);
|
|
|
const auto interpolation = broadcastLayerAttr(uu.params.interpolation,
|
|
|
uu.params.input_names);
|
|
|
for (auto &&input_name : uu.params.input_names) {
|
|
|
const auto &mm = in_metas[idx];
|
|
|
GAPI_Assert(util::holds_alternative<cv::GArrayDesc>(mm)
|
|
|
&& "Non-array inputs are not supported");
|
|
|
|
|
|
if (op.k.inKinds[idx] == cv::detail::OpaqueKind::CV_RECT) {
|
|
|
const auto input_trait = clarifyTrait(mm_0, tensor_desc_0.getDims());
|
|
|
GAPI_Assert(input_trait == cv::gapi::ie::TraitAs::IMAGE
|
|
|
&& "IE Backend: Only image is supported as the 0th argument for an input array of cv::Rect");
|
|
|
|
|
|
|
|
|
|
|
|
if (uu.params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Load) {
|
|
|
auto inputs = uu.net.getInputsInfo();
|
|
|
|
|
|
auto ii = inputs.at(input_name);
|
|
|
if (uu.params.layer_names_to_reshape.find(input_name) !=
|
|
|
uu.params.layer_names_to_reshape.end()) {
|
|
|
configureInputReshapeByImage(ii, mm_0, input_reshape_table);
|
|
|
}
|
|
|
cfgInputPreprocessing(input_trait, ii, mm_0,
|
|
|
input_name, input_layout, interpolation);
|
|
|
|
|
|
for (auto &&p : uu.params.const_inputs) {
|
|
|
inputs.at(p.first)->setPrecision(toIE(p.second.first.depth()));
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (!input_reshape_table.empty()) {
|
|
|
const_cast<IE::CNNNetwork *>(&uu.net)->reshape(input_reshape_table);
|
|
|
}
|
|
|
const auto output_layout = broadcastLayerAttr(uu.params.output_layout,
|
|
|
uu.params.output_names);
|
|
|
configureOutputLayout(uu.net.getOutputsInfo(), output_layout);
|
|
|
configureOutputPrecision(uu.net.getOutputsInfo(), uu.params.output_precision);
|
|
|
} else {
|
|
|
GAPI_Assert(uu.params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Import);
|
|
|
auto inputs = uu.this_network.GetInputsInfo();
|
|
|
auto* non_const_prepm = const_cast<IEUnit::PreProcMap*>(&uu.preproc_map);
|
|
|
auto ii = inputs.at(input_name);
|
|
|
const auto explicit_resize = lookUp(interpolation, input_name);
|
|
|
non_const_prepm->emplace(
|
|
|
input_name, createPreProcInfo(input_trait, mm_0, explicit_resize));
|
|
|
}
|
|
|
} else {
|
|
|
|
|
|
|
|
|
|
|
|
GAPI_Assert(op.k.inKinds[idx] == cv::detail::OpaqueKind::CV_MAT);
|
|
|
|
|
|
|
|
|
const auto explicit_resize = lookUp(interpolation, input_name);
|
|
|
const auto explicit_layout = lookUp(input_layout , input_name);
|
|
|
if (explicit_resize || explicit_layout) {
|
|
|
util::throw_error(std::logic_error(
|
|
|
"InferList2 doesn't support preprocessing for \"tensor\"'s arguments!"));
|
|
|
}
|
|
|
}
|
|
|
idx++;
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return cv::GMetaArgs(uu.params.output_names.size(),
|
|
|
cv::GMetaArg{cv::empty_array_desc()});
|
|
|
}
|
|
|
|
|
|
static void run(std::shared_ptr<IECallContext> ctx,
|
|
|
cv::gimpl::ie::RequestPool &reqPool) {
|
|
|
GAPI_Assert(ctx->inArgs().size() > 1u
|
|
|
&& "This operation must have at least two arguments");
|
|
|
|
|
|
|
|
|
IE::Blob::Ptr blob_0 = extractBlob(*ctx, 0, cv::gapi::ie::TraitAs::IMAGE,
|
|
|
IE::Layout::ANY,
|
|
|
ctx->uu.params.input_names[0u],
|
|
|
cv::util::optional<cv::Rect>{});
|
|
|
const auto list_size = ctx->inArg<cv::detail::VectorRef>(1u).size();
|
|
|
if (list_size == 0u) {
|
|
|
for (auto i : ade::util::iota(ctx->uu.params.num_out)) {
|
|
|
auto output = ctx->output(i);
|
|
|
ctx->out.meta(output, ctx->getMeta());
|
|
|
ctx->out.post(std::move(output));
|
|
|
}
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
std::vector< std::vector<int> > cached_dims(ctx->uu.params.num_out);
|
|
|
for (auto i : ade::util::iota(ctx->uu.params.num_out)) {
|
|
|
const auto& out_name = ctx->uu.params.output_names[i];
|
|
|
const auto& desc =
|
|
|
ctx->uu.params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Load
|
|
|
? ctx->uu.net.getOutputsInfo().at(out_name)->getTensorDesc()
|
|
|
: ctx->uu.this_network.GetOutputsInfo().at(out_name)->getTensorDesc();
|
|
|
cached_dims[i] = toCVDims(toCV(desc.getDims()), desc.getLayout());
|
|
|
|
|
|
|
|
|
auto& out_vec = ctx->outVecR<cv::Mat>(i);
|
|
|
out_vec.clear();
|
|
|
out_vec.resize(list_size);
|
|
|
}
|
|
|
|
|
|
PostOutputsList callback(list_size, ctx, std::move(cached_dims));
|
|
|
for (const auto &list_idx : ade::util::iota(list_size)) {
|
|
|
reqPool.getIdleRequest()->execute(
|
|
|
IInferExecutor::Task {
|
|
|
[ctx, list_idx, list_size, blob_0](InferenceEngine::InferRequest &req) {
|
|
|
for (auto in_idx : ade::util::iota(ctx->uu.params.num_in)) {
|
|
|
const auto &this_vec = ctx->inArg<cv::detail::VectorRef>(in_idx+1u);
|
|
|
GAPI_Assert(this_vec.size() == list_size);
|
|
|
if (this_vec.getKind() == cv::detail::OpaqueKind::CV_RECT) {
|
|
|
const auto &vec = this_vec.rref<cv::Rect>();
|
|
|
setROIBlob(req, ctx->uu.params.input_names[in_idx],
|
|
|
blob_0, vec[list_idx], *ctx);
|
|
|
} else if (this_vec.getKind() == cv::detail::OpaqueKind::CV_MAT) {
|
|
|
const auto &vec = this_vec.rref<cv::Mat>();
|
|
|
const auto &mat = vec[list_idx];
|
|
|
const auto layer_name = ctx->uu.params.input_names[in_idx];
|
|
|
const auto layout = req.GetBlob(layer_name)->getTensorDesc().getLayout();
|
|
|
setBlob(req, layer_name,
|
|
|
wrapIE(mat, cv::gapi::ie::TraitAs::TENSOR, layout),
|
|
|
*ctx);
|
|
|
} else {
|
|
|
GAPI_Assert(false &&
|
|
|
"Only Rect and Mat types are supported for infer list 2!");
|
|
|
}
|
|
|
}
|
|
|
},
|
|
|
std::bind(callback, std::placeholders::_1, std::placeholders::_2, list_idx)
|
|
|
}
|
|
|
);
|
|
|
}
|
|
|
}
|
|
|
};
|
|
|
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
namespace {
|
|
|
class GIEBackendImpl final: public cv::gapi::GBackend::Priv {
|
|
|
virtual void unpackKernel(ade::Graph &gr,
|
|
|
const ade::NodeHandle &nh,
|
|
|
const cv::GKernelImpl &ii) override {
|
|
|
using namespace cv::gimpl;
|
|
|
|
|
|
|
|
|
GIEModel gm(gr);
|
|
|
auto &np = gm.metadata(nh).get<NetworkParams>();
|
|
|
auto &pp = cv::util::any_cast<cv::gapi::ie::detail::ParamDesc>(np.opaque);
|
|
|
const auto &ki = cv::util::any_cast<KImpl>(ii.opaque);
|
|
|
|
|
|
GModel::Graph model(gr);
|
|
|
auto& op = model.metadata(nh).get<Op>();
|
|
|
|
|
|
|
|
|
if (pp.is_generic)
|
|
|
{
|
|
|
auto& info = cv::util::any_cast<cv::detail::InOutInfo>(op.params);
|
|
|
pp.input_names = info.in_names;
|
|
|
pp.output_names = info.out_names;
|
|
|
pp.num_in = info.in_names.size();
|
|
|
pp.num_out = info.out_names.size();
|
|
|
}
|
|
|
|
|
|
gm.metadata(nh).set(IEUnit{pp});
|
|
|
gm.metadata(nh).set(IECallable{ki.run});
|
|
|
gm.metadata(nh).set(CustomMetaFunction{ki.customMetaFunc});
|
|
|
}
|
|
|
|
|
|
virtual EPtr compile(const ade::Graph &graph,
|
|
|
const cv::GCompileArgs &,
|
|
|
const std::vector<ade::NodeHandle> &nodes) const override {
|
|
|
return EPtr{new cv::gimpl::ie::GIEExecutable(graph, nodes)};
|
|
|
}
|
|
|
|
|
|
virtual cv::GKernelPackage auxiliaryKernels() const override {
|
|
|
return cv::gapi::kernels< cv::gimpl::ie::Infer
|
|
|
, cv::gimpl::ie::InferROI
|
|
|
, cv::gimpl::ie::InferList
|
|
|
, cv::gimpl::ie::InferList2
|
|
|
>();
|
|
|
}
|
|
|
|
|
|
virtual bool controlsMerge() const override {
|
|
|
return true;
|
|
|
}
|
|
|
|
|
|
virtual bool allowsMerge(const cv::gimpl::GIslandModel::Graph &,
|
|
|
const ade::NodeHandle &,
|
|
|
const ade::NodeHandle &,
|
|
|
const ade::NodeHandle &) const override {
|
|
|
return false;
|
|
|
}
|
|
|
};
|
|
|
}
|
|
|
|
|
|
cv::gapi::GBackend cv::gapi::ie::backend() {
|
|
|
static cv::gapi::GBackend this_backend(std::make_shared<GIEBackendImpl>());
|
|
|
return this_backend;
|
|
|
}
|
|
|
|
|
|
cv::Mat cv::gapi::ie::util::to_ocv(IE::Blob::Ptr blob) {
|
|
|
const auto& tdesc = blob->getTensorDesc();
|
|
|
return cv::Mat(toCV(tdesc.getDims()),
|
|
|
toCV(tdesc.getPrecision()),
|
|
|
blob->buffer().as<uint8_t*>());
|
|
|
}
|
|
|
|
|
|
std::vector<int> cv::gapi::ie::util::to_ocv(const IE::SizeVector &dims) {
|
|
|
return toCV(dims);
|
|
|
}
|
|
|
|
|
|
IE::Blob::Ptr cv::gapi::ie::util::to_ie(const cv::Mat &blob) {
|
|
|
return wrapIE(blob, cv::gapi::ie::TraitAs::IMAGE);
|
|
|
}
|
|
|
|
|
|
IE::Blob::Ptr cv::gapi::ie::util::to_ie(const cv::Mat &y_plane, const cv::Mat &uv_plane) {
|
|
|
auto y_blob = wrapIE(y_plane, cv::gapi::ie::TraitAs::IMAGE);
|
|
|
auto uv_blob = wrapIE(uv_plane, cv::gapi::ie::TraitAs::IMAGE);
|
|
|
#if INF_ENGINE_RELEASE > 2023000000
|
|
|
cv::util::throw_error(std::logic_error(
|
|
|
"IE Backend: NV12 feature has been deprecated in OpenVINO 1.0 API."
|
|
|
" The last version which supports this is 2023.0"));
|
|
|
#elif INF_ENGINE_RELEASE >= 2021010000
|
|
|
return IE::make_shared_blob<IE::NV12Blob>(y_blob, uv_blob);
|
|
|
#else
|
|
|
return IE::make_shared_blob<InferenceEngine::NV12Blob>(y_blob, uv_blob);
|
|
|
#endif
|
|
|
}
|
|
|
|
|
|
#else
|
|
|
|
|
|
cv::gapi::GBackend cv::gapi::ie::backend() {
|
|
|
|
|
|
util::throw_error(std::runtime_error("G-API has been compiled without OpenVINO IE support"));
|
|
|
}
|
|
|
#endif
|
|
|
|