Commit 0e1ef8f8 authored by Lubov Batanina's avatar Lubov Batanina Committed by Alexander Alekhin

Merge pull request #15184 from l-bat:IE_R2

Support new IE API (#15184)

* Add support OpenVINO R2 for layers

* Add Core API

* Fix tests

* Fix expectNoFallbacksFromIE for ONNX nets

* Remove deprecated API

* Remove td

* Remove TargetDevice

* Fix Async

* Add test

* Fix detectMyriadX

* Fix test

* Fix warning
parent cf93a05d
...@@ -713,21 +713,23 @@ struct DataLayer : public Layer ...@@ -713,21 +713,23 @@ struct DataLayer : public Layer
CV_Assert(numChannels <= 4); CV_Assert(numChannels <= 4);
// Scale // Scale
auto weights = InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32, InferenceEngine::TensorDesc td(InferenceEngine::Precision::FP32, {numChannels},
{numChannels}); InferenceEngine::Layout::C);
auto weights = InferenceEngine::make_shared_blob<float>(td);
weights->allocate(); weights->allocate();
weights->set(std::vector<float>(numChannels, scaleFactors[0]));
float* weight_buf = weights->buffer().as<float*>();
std::fill(weight_buf, weight_buf + numChannels, scaleFactors[0]);
// Mean subtraction // Mean subtraction
auto biases = InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32, auto biases = InferenceEngine::make_shared_blob<float>(td);
{numChannels});
biases->allocate(); biases->allocate();
std::vector<float> biasesVec(numChannels); float* bias_buf = biases->buffer().as<float*>();
for (int i = 0; i < numChannels; ++i) for (int i = 0; i < numChannels; ++i)
{ {
biasesVec[i] = -means[0][i] * scaleFactors[0]; bias_buf[i] = -means[0][i] * scaleFactors[0];
} }
biases->set(biasesVec);
InferenceEngine::Builder::Layer ieLayer = InferenceEngine::Builder::ScaleShiftLayer(name); InferenceEngine::Builder::Layer ieLayer = InferenceEngine::Builder::ScaleShiftLayer(name);
addConstantData("weights", weights, ieLayer); addConstantData("weights", weights, ieLayer);
...@@ -1473,7 +1475,11 @@ struct Net::Impl ...@@ -1473,7 +1475,11 @@ struct Net::Impl
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i) for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
{ {
InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]); InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
dataPtr->name = netInputLayer->outNames.empty() ? ld.name : netInputLayer->outNames[i]; dataPtr->name = netInputLayer->outNames.empty() ? ld.name : netInputLayer->outNames[i];
#else
dataPtr->setName(netInputLayer->outNames.empty() ? ld.name : netInputLayer->outNames[i]);
#endif
} }
} }
else else
...@@ -1481,7 +1487,11 @@ struct Net::Impl ...@@ -1481,7 +1487,11 @@ struct Net::Impl
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i) for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
{ {
InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]); InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
dataPtr->name = ld.name; dataPtr->name = ld.name;
#else
dataPtr->setName(ld.name);
#endif
} }
} }
} }
...@@ -1502,7 +1512,11 @@ struct Net::Impl ...@@ -1502,7 +1512,11 @@ struct Net::Impl
for (int i = 0; i < ld.inputBlobsWrappers.size(); ++i) for (int i = 0; i < ld.inputBlobsWrappers.size(); ++i)
{ {
InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.inputBlobsWrappers[i]); InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.inputBlobsWrappers[i]);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
dataPtr->name = netInputLayer->outNames[i]; dataPtr->name = netInputLayer->outNames[i];
#else
dataPtr->setName(netInputLayer->outNames[i]);
#endif
} }
} }
else else
...@@ -1510,7 +1524,11 @@ struct Net::Impl ...@@ -1510,7 +1524,11 @@ struct Net::Impl
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i) for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
{ {
InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]); InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
dataPtr->name = ld.name; dataPtr->name = ld.name;
#else
dataPtr->setName(ld.name);
#endif
} }
} }
ieNode->net->addBlobs(ld.inputBlobsWrappers); ieNode->net->addBlobs(ld.inputBlobsWrappers);
......
...@@ -111,7 +111,8 @@ public: ...@@ -111,7 +111,8 @@ public:
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{ {
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
CV_Assert(!input->dims.empty()); std::vector<size_t> dims = input->getDims();
CV_Assert(!dims.empty());
InferenceEngine::Builder::Layer ieLayer(name); InferenceEngine::Builder::Layer ieLayer(name);
ieLayer.setName(name); ieLayer.setName(name);
...@@ -122,12 +123,10 @@ public: ...@@ -122,12 +123,10 @@ public:
else else
{ {
ieLayer.setType("Split"); ieLayer.setType("Split");
ieLayer.getParameters()["axis"] = input->dims.size() - 1; ieLayer.getParameters()["axis"] = dims.size() - 1;
ieLayer.getParameters()["out_sizes"] = input->dims[0]; ieLayer.getParameters()["out_sizes"] = dims[0];
} }
std::vector<size_t> shape(input->dims); ieLayer.setInputPorts({InferenceEngine::Port(dims)});
std::reverse(shape.begin(), shape.end());
ieLayer.setInputPorts({InferenceEngine::Port(shape)});
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1)); ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer)); return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
} }
......
...@@ -304,7 +304,7 @@ public: ...@@ -304,7 +304,7 @@ public:
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
InferenceEngine::Builder::ConcatLayer ieLayer(name); InferenceEngine::Builder::ConcatLayer ieLayer(name);
ieLayer.setAxis(clamp(axis, input->dims.size())); ieLayer.setAxis(clamp(axis, input->getDims().size()));
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(inputs.size())); ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(inputs.size()));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer)); return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
} }
......
...@@ -465,14 +465,13 @@ public: ...@@ -465,14 +465,13 @@ public:
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
{ {
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
CV_Assert(input->dims.size() == 4 || input->dims.size() == 5); std::vector<size_t> dims = input->getDims();
CV_Assert(dims.size() == 4 || dims.size() == 5);
const int inpCn = input->dims[input->dims.size() - 2]; // NOTE: input->dims are reversed (WHIO or WHDIO) const int inpCn = dims[1];
const int outCn = blobs[0].size[0]; const int outCn = blobs[0].size[0];
const int inpGroupCn = blobs[0].size[1]; const int inpGroupCn = blobs[0].size[1];
const int group = inpCn / inpGroupCn; const int group = inpCn / inpGroupCn;
InferenceEngine::Layout layout = (dims.size() == 4) ? InferenceEngine::Layout::OIHW :
InferenceEngine::Layout layout = (input->dims.size() == 4) ? InferenceEngine::Layout::OIHW :
InferenceEngine::Layout::NCDHW; InferenceEngine::Layout::NCDHW;
auto ieWeights = wrapToInfEngineBlob(blobs[0], layout); auto ieWeights = wrapToInfEngineBlob(blobs[0], layout);
...@@ -485,9 +484,10 @@ public: ...@@ -485,9 +484,10 @@ public:
} }
else else
{ {
ieWeights = InferenceEngine::make_shared_blob<float>( ieWeights = InferenceEngine::make_shared_blob<float>({
InferenceEngine::Precision::FP32, layout, InferenceEngine::Precision::FP32,
ieWeights->dims()); ieWeights->getTensorDesc().getDims(), layout
});
ieWeights->allocate(); ieWeights->allocate();
Mat newWeights = infEngineBlobToMat(ieWeights).reshape(1, outCn); Mat newWeights = infEngineBlobToMat(ieWeights).reshape(1, outCn);
...@@ -1877,9 +1877,10 @@ public: ...@@ -1877,9 +1877,10 @@ public:
auto ieWeights = wrapToInfEngineBlob(blobs[0], layout); auto ieWeights = wrapToInfEngineBlob(blobs[0], layout);
if (fusedWeights) if (fusedWeights)
{ {
ieWeights = InferenceEngine::make_shared_blob<float>( ieWeights = InferenceEngine::make_shared_blob<float>({
InferenceEngine::Precision::FP32, layout, InferenceEngine::Precision::FP32,
ieWeights->dims()); ieWeights->getTensorDesc().getDims(), layout
});
ieWeights->allocate(); ieWeights->allocate();
int inpCn = blobs[0].size[0]; int inpCn = blobs[0].size[0];
......
...@@ -261,7 +261,8 @@ public: ...@@ -261,7 +261,8 @@ public:
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{ {
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
if (input->dims.size() == 4) std::vector<size_t> dims = input->getDims();
if (dims.size() == 4)
{ {
InferenceEngine::Builder::NormalizeLayer ieLayer(name); InferenceEngine::Builder::NormalizeLayer ieLayer(name);
...@@ -270,13 +271,14 @@ public: ...@@ -270,13 +271,14 @@ public:
ieLayer.setEpsilon(epsilon); ieLayer.setEpsilon(epsilon);
InferenceEngine::Builder::Layer l = ieLayer; InferenceEngine::Builder::Layer l = ieLayer;
const int numChannels = input->dims[2]; // NOTE: input->dims are reversed (whcn) const int numChannels = dims[1];
InferenceEngine::Blob::Ptr weights; InferenceEngine::Blob::Ptr weights;
if (blobs.empty()) if (blobs.empty())
{ {
weights = InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32, weights = InferenceEngine::make_shared_blob<float>({
InferenceEngine::Layout::C, InferenceEngine::Precision::FP32,
{(size_t)numChannels}); {(size_t)numChannels}, InferenceEngine::Layout::C
});
weights->allocate(); weights->allocate();
Mat weightsMat = infEngineBlobToMat(weights).reshape(1, numChannels); Mat weightsMat = infEngineBlobToMat(weights).reshape(1, numChannels);
......
...@@ -166,9 +166,11 @@ public: ...@@ -166,9 +166,11 @@ public:
if (kernel_size.size() == 3) if (kernel_size.size() == 3)
return preferableTarget == DNN_TARGET_CPU; return preferableTarget == DNN_TARGET_CPU;
if (preferableTarget == DNN_TARGET_MYRIAD) { if (preferableTarget == DNN_TARGET_MYRIAD) {
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
if (type == MAX && (pad_l == 1 && pad_t == 1) && stride == Size(2, 2) ) { if (type == MAX && (pad_l == 1 && pad_t == 1) && stride == Size(2, 2) ) {
return !isMyriadX(); return !isMyriadX();
} }
#endif
return type == MAX || type == AVE; return type == MAX || type == AVE;
} }
else else
......
...@@ -207,12 +207,13 @@ public: ...@@ -207,12 +207,13 @@ public:
} }
else else
{ {
auto weights = InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32, auto weights = InferenceEngine::make_shared_blob<float>({
{numChannels}); InferenceEngine::Precision::FP32, {(size_t)numChannels},
InferenceEngine::Layout::C
});
weights->allocate(); weights->allocate();
float* buf = weights->buffer().as<float*>();
std::vector<float> ones(numChannels, 1); std::fill(buf, buf + numChannels, 1);
weights->set(ones);
addConstantData("weights", weights, l); addConstantData("weights", weights, l);
} }
if (hasBias) if (hasBias)
......
...@@ -301,14 +301,14 @@ public: ...@@ -301,14 +301,14 @@ public:
{ {
std::vector<size_t> outShape(numDims); std::vector<size_t> outShape(numDims);
for (int i = 0; i < numDims; ++i) for (int i = 0; i < numDims; ++i)
outShape[numDims - 1 - i] = sliceRanges[0][i].size(); outShape[i] = sliceRanges[0][i].size();
ieLayer.getInputPorts()[1].setParameter("type", "weights"); ieLayer.getInputPorts()[1].setParameter("type", "weights");
// Fake blob which will be moved to inputs (as weights). auto shapeSource = InferenceEngine::make_shared_blob<float>({
auto shapeSource = InferenceEngine::make_shared_blob<float>( InferenceEngine::Precision::FP32, outShape,
InferenceEngine::Precision::FP32, InferenceEngine::Layout::ANY
InferenceEngine::Layout::ANY, outShape); });
shapeSource->allocate(); shapeSource->allocate();
addConstantData("weights", shapeSource, ieLayer); addConstantData("weights", shapeSource, ieLayer);
} }
......
...@@ -315,7 +315,8 @@ public: ...@@ -315,7 +315,8 @@ public:
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
InferenceEngine::Builder::SoftMaxLayer ieLayer(name); InferenceEngine::Builder::SoftMaxLayer ieLayer(name);
ieLayer.setAxis(clamp(axisRaw, input->dims.size())); ieLayer.setAxis(clamp(axisRaw, input->getDims().size()));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer)); return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
} }
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
......
...@@ -45,13 +45,13 @@ infEngineWrappers(const std::vector<Ptr<BackendWrapper> >& ptrs) ...@@ -45,13 +45,13 @@ infEngineWrappers(const std::vector<Ptr<BackendWrapper> >& ptrs)
InfEngineBackendNet::InfEngineBackendNet() : netBuilder("") InfEngineBackendNet::InfEngineBackendNet() : netBuilder("")
{ {
hasNetOwner = false; hasNetOwner = false;
targetDevice = InferenceEngine::TargetDevice::eCPU; device_name = "CPU";
} }
InfEngineBackendNet::InfEngineBackendNet(InferenceEngine::CNNNetwork& net) : netBuilder(""), cnn(net) InfEngineBackendNet::InfEngineBackendNet(InferenceEngine::CNNNetwork& net) : netBuilder(""), cnn(net)
{ {
hasNetOwner = true; hasNetOwner = true;
targetDevice = InferenceEngine::TargetDevice::eCPU; device_name = "CPU";
} }
void InfEngineBackendNet::connect(const std::vector<Ptr<BackendWrapper> >& inputs, void InfEngineBackendNet::connect(const std::vector<Ptr<BackendWrapper> >& inputs,
...@@ -66,16 +66,13 @@ void InfEngineBackendNet::connect(const std::vector<Ptr<BackendWrapper> >& input ...@@ -66,16 +66,13 @@ void InfEngineBackendNet::connect(const std::vector<Ptr<BackendWrapper> >& input
for (size_t i = 0; i < inpWrappers.size(); ++i) for (size_t i = 0; i < inpWrappers.size(); ++i)
{ {
const auto& inp = inpWrappers[i]; const auto& inp = inpWrappers[i];
const std::string& inpName = inp->dataPtr->name; const std::string& inpName = inp->dataPtr->getName();
int inpId; int inpId;
it = layers.find(inpName); it = layers.find(inpName);
if (it == layers.end()) if (it == layers.end())
{ {
InferenceEngine::Builder::InputLayer inpLayer(!inpName.empty() ? inpName : kDefaultInpLayerName); InferenceEngine::Builder::InputLayer inpLayer(!inpName.empty() ? inpName : kDefaultInpLayerName);
std::vector<size_t> shape(inp->blob->getTensorDesc().getDims());
std::vector<size_t> shape(inp->blob->dims());
std::reverse(shape.begin(), shape.end());
inpLayer.setPort(InferenceEngine::Port(shape)); inpLayer.setPort(InferenceEngine::Port(shape));
inpId = netBuilder.addLayer(inpLayer); inpId = netBuilder.addLayer(inpLayer);
...@@ -89,7 +86,11 @@ void InfEngineBackendNet::connect(const std::vector<Ptr<BackendWrapper> >& input ...@@ -89,7 +86,11 @@ void InfEngineBackendNet::connect(const std::vector<Ptr<BackendWrapper> >& input
} }
CV_Assert(!outputs.empty()); CV_Assert(!outputs.empty());
InferenceEngine::DataPtr dataPtr = infEngineDataNode(outputs[0]); InferenceEngine::DataPtr dataPtr = infEngineDataNode(outputs[0]);
#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
dataPtr->name = layerName; dataPtr->name = layerName;
#else
dataPtr->setName(layerName);
#endif
} }
void InfEngineBackendNet::init(int targetId) void InfEngineBackendNet::init(int targetId)
...@@ -116,20 +117,21 @@ void InfEngineBackendNet::init(int targetId) ...@@ -116,20 +117,21 @@ void InfEngineBackendNet::init(int targetId)
switch (targetId) switch (targetId)
{ {
case DNN_TARGET_CPU: case DNN_TARGET_CPU:
targetDevice = InferenceEngine::TargetDevice::eCPU; device_name = "CPU";
break; break;
case DNN_TARGET_OPENCL: case DNN_TARGET_OPENCL_FP16: case DNN_TARGET_OPENCL:
targetDevice = InferenceEngine::TargetDevice::eGPU; case DNN_TARGET_OPENCL_FP16:
device_name = "GPU";
break; break;
case DNN_TARGET_MYRIAD: case DNN_TARGET_MYRIAD:
targetDevice = InferenceEngine::TargetDevice::eMYRIAD; device_name = "MYRIAD";
break; break;
case DNN_TARGET_FPGA: case DNN_TARGET_FPGA:
targetDevice = InferenceEngine::TargetDevice::eFPGA; device_name = "FPGA";
break; break;
default: default:
CV_Error(Error::StsError, format("Unknown target identifier: %d", targetId)); CV_Error(Error::StsNotImplemented, "Unknown target");
} };
for (const auto& name : requestedOutputs) for (const auto& name : requestedOutputs)
{ {
...@@ -141,14 +143,14 @@ void InfEngineBackendNet::init(int targetId) ...@@ -141,14 +143,14 @@ void InfEngineBackendNet::init(int targetId)
const std::string& name = it.first; const std::string& name = it.first;
auto blobIt = allBlobs.find(name); auto blobIt = allBlobs.find(name);
CV_Assert(blobIt != allBlobs.end()); CV_Assert(blobIt != allBlobs.end());
it.second->setPrecision(blobIt->second->precision()); it.second->setPrecision(blobIt->second->getTensorDesc().getPrecision());
} }
for (const auto& it : cnn.getOutputsInfo()) for (const auto& it : cnn.getOutputsInfo())
{ {
const std::string& name = it.first; const std::string& name = it.first;
auto blobIt = allBlobs.find(name); auto blobIt = allBlobs.find(name);
CV_Assert(blobIt != allBlobs.end()); CV_Assert(blobIt != allBlobs.end());
it.second->setPrecision(blobIt->second->precision()); // Should be always FP32 it.second->setPrecision(blobIt->second->getTensorDesc().getPrecision()); // Should be always FP32
} }
initPlugin(cnn); initPlugin(cnn);
...@@ -223,16 +225,13 @@ static InferenceEngine::Layout estimateLayout(const Mat& m) ...@@ -223,16 +225,13 @@ static InferenceEngine::Layout estimateLayout(const Mat& m)
static InferenceEngine::DataPtr wrapToInfEngineDataNode(const Mat& m, const std::string& name = "") static InferenceEngine::DataPtr wrapToInfEngineDataNode(const Mat& m, const std::string& name = "")
{ {
std::vector<size_t> reversedShape(&m.size[0], &m.size[0] + m.dims); std::vector<size_t> shape(&m.size[0], &m.size[0] + m.dims);
std::reverse(reversedShape.begin(), reversedShape.end());
if (m.type() == CV_32F) if (m.type() == CV_32F)
return InferenceEngine::DataPtr( return InferenceEngine::DataPtr(new InferenceEngine::Data(name,
new InferenceEngine::Data(name, reversedShape, InferenceEngine::Precision::FP32, estimateLayout(m)) {InferenceEngine::Precision::FP32, shape, estimateLayout(m)}));
);
else if (m.type() == CV_8U) else if (m.type() == CV_8U)
return InferenceEngine::DataPtr( return InferenceEngine::DataPtr(new InferenceEngine::Data(name,
new InferenceEngine::Data(name, reversedShape, InferenceEngine::Precision::U8, estimateLayout(m)) {InferenceEngine::Precision::U8, shape, estimateLayout(m)}));
);
else else
CV_Error(Error::StsNotImplemented, format("Unsupported data type %d", m.type())); CV_Error(Error::StsNotImplemented, format("Unsupported data type %d", m.type()));
} }
...@@ -241,33 +240,33 @@ InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, const std::vector<s ...@@ -241,33 +240,33 @@ InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, const std::vector<s
InferenceEngine::Layout layout) InferenceEngine::Layout layout)
{ {
if (m.type() == CV_32F) if (m.type() == CV_32F)
return InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32, return InferenceEngine::make_shared_blob<float>(
layout, shape, (float*)m.data); {InferenceEngine::Precision::FP32, shape, layout}, (float*)m.data);
else if (m.type() == CV_8U) else if (m.type() == CV_8U)
return InferenceEngine::make_shared_blob<uint8_t>(InferenceEngine::Precision::U8, return InferenceEngine::make_shared_blob<uint8_t>(
layout, shape, (uint8_t*)m.data); {InferenceEngine::Precision::U8, shape, layout}, (uint8_t*)m.data);
else else
CV_Error(Error::StsNotImplemented, format("Unsupported data type %d", m.type())); CV_Error(Error::StsNotImplemented, format("Unsupported data type %d", m.type()));
} }
InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, InferenceEngine::Layout layout) InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, InferenceEngine::Layout layout)
{ {
std::vector<size_t> reversedShape(&m.size[0], &m.size[0] + m.dims); std::vector<size_t> shape(&m.size[0], &m.size[0] + m.dims);
std::reverse(reversedShape.begin(), reversedShape.end()); return wrapToInfEngineBlob(m, shape, layout);
return wrapToInfEngineBlob(m, reversedShape, layout);
} }
InferenceEngine::Blob::Ptr cloneBlob(const InferenceEngine::Blob::Ptr& blob) InferenceEngine::Blob::Ptr cloneBlob(const InferenceEngine::Blob::Ptr& blob)
{ {
InferenceEngine::Precision precision = blob->precision();
InferenceEngine::Blob::Ptr copy; InferenceEngine::Blob::Ptr copy;
auto description = blob->getTensorDesc();
InferenceEngine::Precision precision = description.getPrecision();
if (precision == InferenceEngine::Precision::FP32) if (precision == InferenceEngine::Precision::FP32)
{ {
copy = InferenceEngine::make_shared_blob<float>(precision, blob->layout(), blob->dims()); copy = InferenceEngine::make_shared_blob<float>(description);
} }
else if (precision == InferenceEngine::Precision::U8) else if (precision == InferenceEngine::Precision::U8)
{ {
copy = InferenceEngine::make_shared_blob<uint8_t>(precision, blob->layout(), blob->dims()); copy = InferenceEngine::make_shared_blob<uint8_t>(description);
} }
else else
CV_Error(Error::StsNotImplemented, "Unsupported blob precision"); CV_Error(Error::StsNotImplemented, "Unsupported blob precision");
...@@ -296,10 +295,8 @@ InfEngineBackendWrapper::InfEngineBackendWrapper(Ptr<BackendWrapper> wrapper) ...@@ -296,10 +295,8 @@ InfEngineBackendWrapper::InfEngineBackendWrapper(Ptr<BackendWrapper> wrapper)
Ptr<InfEngineBackendWrapper> ieWrapper = wrapper.dynamicCast<InfEngineBackendWrapper>(); Ptr<InfEngineBackendWrapper> ieWrapper = wrapper.dynamicCast<InfEngineBackendWrapper>();
CV_Assert(!ieWrapper.empty()); CV_Assert(!ieWrapper.empty());
InferenceEngine::DataPtr srcData = ieWrapper->dataPtr; InferenceEngine::DataPtr srcData = ieWrapper->dataPtr;
dataPtr = InferenceEngine::DataPtr(
new InferenceEngine::Data(srcData->name, srcData->dims, srcData->precision, dataPtr = InferenceEngine::DataPtr(new InferenceEngine::Data(srcData->getName(), srcData->getTensorDesc()));
srcData->layout)
);
blob = ieWrapper->blob; blob = ieWrapper->blob;
} }
...@@ -323,12 +320,19 @@ void InfEngineBackendWrapper::setHostDirty() ...@@ -323,12 +320,19 @@ void InfEngineBackendWrapper::setHostDirty()
} }
static std::map<InferenceEngine::TargetDevice, InferenceEngine::InferenceEnginePluginPtr>& getSharedPlugins() #if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
static std::map<std::string, InferenceEngine::InferenceEnginePluginPtr>& getSharedPlugins()
{ {
static std::map<InferenceEngine::TargetDevice, InferenceEngine::InferenceEnginePluginPtr> sharedPlugins; static std::map<std::string, InferenceEngine::InferenceEnginePluginPtr> sharedPlugins;
return sharedPlugins; return sharedPlugins;
} }
#else
static InferenceEngine::Core& getCore()
{
static InferenceEngine::Core core;
return core;
}
#endif
#if !defined(OPENCV_DNN_IE_VPU_TYPE_DEFAULT) #if !defined(OPENCV_DNN_IE_VPU_TYPE_DEFAULT)
static bool detectMyriadX_() static bool detectMyriadX_()
...@@ -361,24 +365,29 @@ static bool detectMyriadX_() ...@@ -361,24 +365,29 @@ static bool detectMyriadX_()
InferenceEngine::CNNNetwork cnn = InferenceEngine::CNNNetwork( InferenceEngine::CNNNetwork cnn = InferenceEngine::CNNNetwork(
InferenceEngine::Builder::convertToICNNNetwork(builder.build())); InferenceEngine::Builder::convertToICNNNetwork(builder.build()));
InferenceEngine::TargetDevice device = InferenceEngine::TargetDevice::eMYRIAD; #if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
InferenceEngine::InferenceEnginePluginPtr enginePtr; InferenceEngine::InferenceEnginePluginPtr enginePtr;
{ {
AutoLock lock(getInitializationMutex()); AutoLock lock(getInitializationMutex());
auto& sharedPlugins = getSharedPlugins(); auto& sharedPlugins = getSharedPlugins();
auto pluginIt = sharedPlugins.find(device); auto pluginIt = sharedPlugins.find("MYRIAD");
if (pluginIt != sharedPlugins.end()) { if (pluginIt != sharedPlugins.end()) {
enginePtr = pluginIt->second; enginePtr = pluginIt->second;
} else { } else {
auto dispatcher = InferenceEngine::PluginDispatcher({""}); auto dispatcher = InferenceEngine::PluginDispatcher({""});
enginePtr = dispatcher.getSuitablePlugin(device); enginePtr = dispatcher.getPluginByDevice("MYRIAD");
sharedPlugins[device] = enginePtr; sharedPlugins["MYRIAD"] = enginePtr;
} }
} }
auto plugin = InferenceEngine::InferencePlugin(enginePtr); auto plugin = InferenceEngine::InferencePlugin(enginePtr);
try try
{ {
auto netExec = plugin.LoadNetwork(cnn, {{"VPU_PLATFORM", "VPU_2480"}}); auto netExec = plugin.LoadNetwork(cnn, {{"VPU_PLATFORM", "VPU_2480"}});
#else
try
{
auto netExec = getCore().LoadNetwork(cnn, "MYRIAD", {{"VPU_PLATFORM", "VPU_2480"}});
#endif
auto infRequest = netExec.CreateInferRequest(); auto infRequest = netExec.CreateInferRequest();
} catch(...) { } catch(...) {
return false; return false;
...@@ -387,38 +396,41 @@ static bool detectMyriadX_() ...@@ -387,38 +396,41 @@ static bool detectMyriadX_()
} }
#endif // !defined(OPENCV_DNN_IE_VPU_TYPE_DEFAULT) #endif // !defined(OPENCV_DNN_IE_VPU_TYPE_DEFAULT)
void InfEngineBackendNet::initPlugin(InferenceEngine::ICNNNetwork& net) void InfEngineBackendNet::initPlugin(InferenceEngine::CNNNetwork& net)
{ {
CV_Assert(!isInitialized()); CV_Assert(!isInitialized());
try try
{ {
AutoLock lock(getInitializationMutex()); AutoLock lock(getInitializationMutex());
#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
auto& sharedPlugins = getSharedPlugins(); auto& sharedPlugins = getSharedPlugins();
auto pluginIt = sharedPlugins.find(targetDevice); auto pluginIt = sharedPlugins.find(device_name);
if (pluginIt != sharedPlugins.end()) if (pluginIt != sharedPlugins.end())
{ {
enginePtr = pluginIt->second; enginePtr = pluginIt->second;
} }
else else
#endif
{ {
#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
auto dispatcher = InferenceEngine::PluginDispatcher({""}); auto dispatcher = InferenceEngine::PluginDispatcher({""});
if (targetDevice == InferenceEngine::TargetDevice::eFPGA) if (device_name == "FPGA")
enginePtr = dispatcher.getPluginByDevice("HETERO:FPGA,CPU"); enginePtr = dispatcher.getPluginByDevice("HETERO:FPGA,CPU");
else else
enginePtr = dispatcher.getSuitablePlugin(targetDevice); enginePtr = dispatcher.getPluginByDevice(device_name);
sharedPlugins[targetDevice] = enginePtr; sharedPlugins[device_name] = enginePtr;
#else
isInit = true;
#endif
std::vector<std::string> candidates; std::vector<std::string> candidates;
std::string param_pluginPath = utils::getConfigurationParameterString("OPENCV_DNN_IE_EXTRA_PLUGIN_PATH", ""); std::string param_pluginPath = utils::getConfigurationParameterString("OPENCV_DNN_IE_EXTRA_PLUGIN_PATH", "");
if (!param_pluginPath.empty()) if (!param_pluginPath.empty())
{ {
candidates.push_back(param_pluginPath); candidates.push_back(param_pluginPath);
} }
if (targetDevice == InferenceEngine::TargetDevice::eCPU || if (device_name == "CPU" || device_name == "FPGA")
targetDevice == InferenceEngine::TargetDevice::eFPGA)
{ {
std::string suffixes[] = {"_avx2", "_sse4", ""}; std::string suffixes[] = {"_avx2", "_sse4", ""};
bool haveFeature[] = { bool haveFeature[] = {
...@@ -448,7 +460,12 @@ void InfEngineBackendNet::initPlugin(InferenceEngine::ICNNNetwork& net) ...@@ -448,7 +460,12 @@ void InfEngineBackendNet::initPlugin(InferenceEngine::ICNNNetwork& net)
{ {
InferenceEngine::IExtensionPtr extension = InferenceEngine::IExtensionPtr extension =
InferenceEngine::make_so_pointer<InferenceEngine::IExtension>(libName); InferenceEngine::make_so_pointer<InferenceEngine::IExtension>(libName);
#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
enginePtr->AddExtension(extension, 0); enginePtr->AddExtension(extension, 0);
#else
getCore().AddExtension(extension, "CPU");
#endif
CV_LOG_INFO(NULL, "DNN-IE: Loaded extension plugin: " << libName); CV_LOG_INFO(NULL, "DNN-IE: Loaded extension plugin: " << libName);
found = true; found = true;
break; break;
...@@ -462,14 +479,24 @@ void InfEngineBackendNet::initPlugin(InferenceEngine::ICNNNetwork& net) ...@@ -462,14 +479,24 @@ void InfEngineBackendNet::initPlugin(InferenceEngine::ICNNNetwork& net)
// Some of networks can work without a library of extra layers. // Some of networks can work without a library of extra layers.
#ifndef _WIN32 #ifndef _WIN32
// Limit the number of CPU threads. // Limit the number of CPU threads.
#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
enginePtr->SetConfig({{ enginePtr->SetConfig({{
InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, format("%d", getNumThreads()), InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, format("%d", getNumThreads()),
}}, 0); }}, 0);
#else
if (device_name == "CPU")
getCore().SetConfig({{
InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, format("%d", getNumThreads()),
}}, device_name);
#endif
#endif #endif
} }
#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
plugin = InferenceEngine::InferencePlugin(enginePtr); plugin = InferenceEngine::InferencePlugin(enginePtr);
netExec = plugin.LoadNetwork(net, {}); netExec = plugin.LoadNetwork(net, {});
#else
netExec = getCore().LoadNetwork(net, device_name);
#endif
} }
catch (const std::exception& ex) catch (const std::exception& ex)
{ {
...@@ -479,7 +506,11 @@ void InfEngineBackendNet::initPlugin(InferenceEngine::ICNNNetwork& net) ...@@ -479,7 +506,11 @@ void InfEngineBackendNet::initPlugin(InferenceEngine::ICNNNetwork& net)
bool InfEngineBackendNet::isInitialized() bool InfEngineBackendNet::isInitialized()
{ {
#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
return (bool)enginePtr; return (bool)enginePtr;
#else
return isInit;
#endif
} }
void InfEngineBackendNet::addBlobs(const std::vector<cv::Ptr<BackendWrapper> >& ptrs) void InfEngineBackendNet::addBlobs(const std::vector<cv::Ptr<BackendWrapper> >& ptrs)
...@@ -487,7 +518,7 @@ void InfEngineBackendNet::addBlobs(const std::vector<cv::Ptr<BackendWrapper> >& ...@@ -487,7 +518,7 @@ void InfEngineBackendNet::addBlobs(const std::vector<cv::Ptr<BackendWrapper> >&
auto wrappers = infEngineWrappers(ptrs); auto wrappers = infEngineWrappers(ptrs);
for (const auto& wrapper : wrappers) for (const auto& wrapper : wrappers)
{ {
std::string name = wrapper->dataPtr->name; std::string name = wrapper->dataPtr->getName();
name = name.empty() ? kDefaultInpLayerName : name; name = name.empty() ? kDefaultInpLayerName : name;
allBlobs.insert({name, wrapper->blob}); allBlobs.insert({name, wrapper->blob});
} }
...@@ -502,7 +533,7 @@ void InfEngineBackendNet::InfEngineReqWrapper::makePromises(const std::vector<Pt ...@@ -502,7 +533,7 @@ void InfEngineBackendNet::InfEngineReqWrapper::makePromises(const std::vector<Pt
for (int i = 0; i < outs.size(); ++i) for (int i = 0; i < outs.size(); ++i)
{ {
outs[i]->futureMat = outProms[i].getArrayResult(); outs[i]->futureMat = outProms[i].getArrayResult();
outsNames[i] = outs[i]->dataPtr->name; outsNames[i] = outs[i]->dataPtr->getName();
} }
} }
...@@ -626,11 +657,12 @@ void InfEngineBackendNet::forward(const std::vector<Ptr<BackendWrapper> >& outBl ...@@ -626,11 +657,12 @@ void InfEngineBackendNet::forward(const std::vector<Ptr<BackendWrapper> >& outBl
Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob) Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob)
{ {
// NOTE: Inference Engine sizes are reversed. // NOTE: Inference Engine sizes are reversed.
std::vector<size_t> dims = blob->dims(); std::vector<size_t> dims = blob->getTensorDesc().getDims();
std::vector<int> size(dims.rbegin(), dims.rend()); std::vector<int> size(dims.begin(), dims.end());
auto precision = blob->getTensorDesc().getPrecision();
int type = -1; int type = -1;
switch (blob->precision()) switch (precision)
{ {
case InferenceEngine::Precision::FP32: type = CV_32F; break; case InferenceEngine::Precision::FP32: type = CV_32F; break;
case InferenceEngine::Precision::U8: type = CV_8U; break; case InferenceEngine::Precision::U8: type = CV_8U; break;
...@@ -684,7 +716,10 @@ void InfEngineBackendLayer::forward(InputArrayOfArrays inputs, OutputArrayOfArra ...@@ -684,7 +716,10 @@ void InfEngineBackendLayer::forward(InputArrayOfArrays inputs, OutputArrayOfArra
InferenceEngine::Blob::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob) InferenceEngine::Blob::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob)
{ {
auto halfs = InferenceEngine::make_shared_blob<int16_t>(InferenceEngine::Precision::FP16, blob->layout(), blob->dims()); auto halfs = InferenceEngine::make_shared_blob<int16_t>({
InferenceEngine::Precision::FP16, blob->getTensorDesc().getDims(),
blob->getTensorDesc().getLayout()
});
halfs->allocate(); halfs->allocate();
Mat floatsData(1, blob->size(), CV_32F, blob->buffer()); Mat floatsData(1, blob->size(), CV_32F, blob->buffer());
Mat halfsData(1, blob->size(), CV_16SC1, halfs->buffer()); Mat halfsData(1, blob->size(), CV_16SC1, halfs->buffer());
...@@ -731,7 +766,11 @@ void resetMyriadDevice() ...@@ -731,7 +766,11 @@ void resetMyriadDevice()
{ {
#ifdef HAVE_INF_ENGINE #ifdef HAVE_INF_ENGINE
AutoLock lock(getInitializationMutex()); AutoLock lock(getInitializationMutex());
getSharedPlugins().erase(InferenceEngine::TargetDevice::eMYRIAD); #if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
getSharedPlugins().erase("MYRIAD");
#else
getCore().UnregisterPlugin("MYRIAD");
#endif
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
} }
......
...@@ -92,18 +92,22 @@ public: ...@@ -92,18 +92,22 @@ public:
void forward(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers, void forward(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers,
bool isAsync); bool isAsync);
void initPlugin(InferenceEngine::ICNNNetwork& net); void initPlugin(InferenceEngine::CNNNetwork& net);
void addBlobs(const std::vector<cv::Ptr<BackendWrapper> >& ptrs); void addBlobs(const std::vector<cv::Ptr<BackendWrapper> >& ptrs);
private: private:
InferenceEngine::Builder::Network netBuilder; InferenceEngine::Builder::Network netBuilder;
InferenceEngine::InferenceEnginePluginPtr enginePtr;
InferenceEngine::InferencePlugin plugin;
InferenceEngine::ExecutableNetwork netExec; InferenceEngine::ExecutableNetwork netExec;
InferenceEngine::BlobMap allBlobs; InferenceEngine::BlobMap allBlobs;
InferenceEngine::TargetDevice targetDevice; std::string device_name;
#if INF_ENGINE_VER_MAJOR_LE(2019010000)
InferenceEngine::InferenceEnginePluginPtr enginePtr;
InferenceEngine::InferencePlugin plugin;
#else
bool isInit = false;
#endif
struct InfEngineReqWrapper struct InfEngineReqWrapper
{ {
......
...@@ -136,13 +136,10 @@ static const std::vector<std::string> getOpenVINOTestModelsList() ...@@ -136,13 +136,10 @@ static const std::vector<std::string> getOpenVINOTestModelsList()
static inline void genData(const std::vector<size_t>& dims, Mat& m, Blob::Ptr& dataPtr) static inline void genData(const std::vector<size_t>& dims, Mat& m, Blob::Ptr& dataPtr)
{ {
std::vector<int> reversedDims(dims.begin(), dims.end()); m.create(std::vector<int>(dims.begin(), dims.end()), CV_32F);
std::reverse(reversedDims.begin(), reversedDims.end());
m.create(reversedDims, CV_32F);
randu(m, -1, 1); randu(m, -1, 1);
dataPtr = make_shared_blob<float>(Precision::FP32, dims, (float*)m.data); dataPtr = make_shared_blob<float>({Precision::FP32, dims, Layout::ANY}, (float*)m.data);
} }
void runIE(Target target, const std::string& xmlPath, const std::string& binPath, void runIE(Target target, const std::string& xmlPath, const std::string& binPath,
...@@ -154,32 +151,42 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath ...@@ -154,32 +151,42 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath
CNNNetwork net = reader.getNetwork(); CNNNetwork net = reader.getNetwork();
std::string device_name;
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GT(2019010000)
Core ie;
#else
InferenceEnginePluginPtr enginePtr; InferenceEnginePluginPtr enginePtr;
InferencePlugin plugin; InferencePlugin plugin;
#endif
ExecutableNetwork netExec; ExecutableNetwork netExec;
InferRequest infRequest; InferRequest infRequest;
try try
{ {
auto dispatcher = InferenceEngine::PluginDispatcher({""});
switch (target) switch (target)
{ {
case DNN_TARGET_CPU: case DNN_TARGET_CPU:
enginePtr = dispatcher.getSuitablePlugin(TargetDevice::eCPU); device_name = "CPU";
break; break;
case DNN_TARGET_OPENCL: case DNN_TARGET_OPENCL:
case DNN_TARGET_OPENCL_FP16: case DNN_TARGET_OPENCL_FP16:
enginePtr = dispatcher.getSuitablePlugin(TargetDevice::eGPU); device_name = "GPU";
break; break;
case DNN_TARGET_MYRIAD: case DNN_TARGET_MYRIAD:
enginePtr = dispatcher.getSuitablePlugin(TargetDevice::eMYRIAD); device_name = "MYRIAD";
break; break;
case DNN_TARGET_FPGA: case DNN_TARGET_FPGA:
enginePtr = dispatcher.getPluginByDevice("HETERO:FPGA,CPU"); device_name = "FPGA";
break; break;
default: default:
CV_Error(Error::StsNotImplemented, "Unknown target"); CV_Error(Error::StsNotImplemented, "Unknown target");
}; };
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
auto dispatcher = InferenceEngine::PluginDispatcher({""});
enginePtr = dispatcher.getPluginByDevice(device_name);
#endif
if (target == DNN_TARGET_CPU || target == DNN_TARGET_FPGA) if (target == DNN_TARGET_CPU || target == DNN_TARGET_FPGA)
{ {
std::string suffixes[] = {"_avx2", "_sse4", ""}; std::string suffixes[] = {"_avx2", "_sse4", ""};
...@@ -202,16 +209,23 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath ...@@ -202,16 +209,23 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath
try try
{ {
IExtensionPtr extension = make_so_pointer<IExtension>(libName); IExtensionPtr extension = make_so_pointer<IExtension>(libName);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GT(2019010000)
ie.AddExtension(extension, device_name);
#else
enginePtr->AddExtension(extension, 0); enginePtr->AddExtension(extension, 0);
#endif
break; break;
} }
catch(...) {} catch(...) {}
} }
// Some of networks can work without a library of extra layers. // Some of networks can work without a library of extra layers.
} }
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GT(2019010000)
netExec = ie.LoadNetwork(net, device_name);
#else
plugin = InferencePlugin(enginePtr); plugin = InferencePlugin(enginePtr);
netExec = plugin.LoadNetwork(net, {}); netExec = plugin.LoadNetwork(net, {});
#endif
infRequest = netExec.CreateInferRequest(); infRequest = netExec.CreateInferRequest();
} }
catch (const std::exception& ex) catch (const std::exception& ex)
...@@ -224,7 +238,7 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath ...@@ -224,7 +238,7 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath
BlobMap inputBlobs; BlobMap inputBlobs;
for (auto& it : net.getInputsInfo()) for (auto& it : net.getInputsInfo())
{ {
genData(it.second->getDims(), inputsMap[it.first], inputBlobs[it.first]); genData(it.second->getTensorDesc().getDims(), inputsMap[it.first], inputBlobs[it.first]);
} }
infRequest.SetInput(inputBlobs); infRequest.SetInput(inputBlobs);
...@@ -233,7 +247,7 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath ...@@ -233,7 +247,7 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath
BlobMap outputBlobs; BlobMap outputBlobs;
for (auto& it : net.getOutputsInfo()) for (auto& it : net.getOutputsInfo())
{ {
genData(it.second->dims, outputsMap[it.first], outputBlobs[it.first]); genData(it.second->getTensorDesc().getDims(), outputsMap[it.first], outputBlobs[it.first]);
} }
infRequest.SetOutput(outputBlobs); infRequest.SetOutput(outputBlobs);
......
...@@ -467,6 +467,42 @@ INSTANTIATE_TEST_CASE_P(/**/, Async, Combine( ...@@ -467,6 +467,42 @@ INSTANTIATE_TEST_CASE_P(/**/, Async, Combine(
Values(CV_32F, CV_8U), Values(CV_32F, CV_8U),
testing::ValuesIn(getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE)) testing::ValuesIn(getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE))
)); ));
typedef testing::TestWithParam<Target> Test_Model_Optimizer;
TEST_P(Test_Model_Optimizer, forward_two_nets)
{
const int target = GetParam();
const std::string suffix = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? "_fp16" : "";
const std::string& model = findDataFile("dnn/layers/layer_convolution" + suffix + ".bin");
const std::string& proto = findDataFile("dnn/layers/layer_convolution" + suffix + ".xml");
Net net0 = readNet(model, proto);
net0.setPreferableTarget(target);
Net net1 = readNet(model, proto);
net1.setPreferableTarget(target);
// Generate inputs.
int blobSize[] = {2, 6, 75, 113};
Mat input(4, &blobSize[0], CV_32F);
randu(input, 0, 255);
net0.setInput(input);
Mat ref0 = net0.forward().clone();
net1.setInput(input);
Mat ref1 = net1.forward();
net0.setInput(input);
Mat ref2 = net0.forward();
normAssert(ref0, ref2, 0, 0);
}
INSTANTIATE_TEST_CASE_P(/**/, Test_Model_Optimizer,
testing::ValuesIn(getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE))
);
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
}} // namespace }} // namespace
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment