Commit 0e1ef8f8 authored by Lubov Batanina's avatar Lubov Batanina Committed by Alexander Alekhin

Merge pull request #15184 from l-bat:IE_R2

Support new IE API (#15184)

* Add support OpenVINO R2 for layers

* Add Core API

* Fix tests

* Fix expectNoFallbacksFromIE for ONNX nets

* Remove deprecated API

* Remove td

* Remove TargetDevice

* Fix Async

* Add test

* Fix detectMyriadX

* Fix test

* Fix warning
parent cf93a05d
......@@ -713,21 +713,23 @@ struct DataLayer : public Layer
CV_Assert(numChannels <= 4);
// Scale
auto weights = InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32,
{numChannels});
InferenceEngine::TensorDesc td(InferenceEngine::Precision::FP32, {numChannels},
InferenceEngine::Layout::C);
auto weights = InferenceEngine::make_shared_blob<float>(td);
weights->allocate();
weights->set(std::vector<float>(numChannels, scaleFactors[0]));
float* weight_buf = weights->buffer().as<float*>();
std::fill(weight_buf, weight_buf + numChannels, scaleFactors[0]);
// Mean subtraction
auto biases = InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32,
{numChannels});
auto biases = InferenceEngine::make_shared_blob<float>(td);
biases->allocate();
std::vector<float> biasesVec(numChannels);
float* bias_buf = biases->buffer().as<float*>();
for (int i = 0; i < numChannels; ++i)
{
biasesVec[i] = -means[0][i] * scaleFactors[0];
bias_buf[i] = -means[0][i] * scaleFactors[0];
}
biases->set(biasesVec);
InferenceEngine::Builder::Layer ieLayer = InferenceEngine::Builder::ScaleShiftLayer(name);
addConstantData("weights", weights, ieLayer);
......@@ -1473,7 +1475,11 @@ struct Net::Impl
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
{
InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
dataPtr->name = netInputLayer->outNames.empty() ? ld.name : netInputLayer->outNames[i];
#else
dataPtr->setName(netInputLayer->outNames.empty() ? ld.name : netInputLayer->outNames[i]);
#endif
}
}
else
......@@ -1481,7 +1487,11 @@ struct Net::Impl
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
{
InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
dataPtr->name = ld.name;
#else
dataPtr->setName(ld.name);
#endif
}
}
}
......@@ -1502,7 +1512,11 @@ struct Net::Impl
for (int i = 0; i < ld.inputBlobsWrappers.size(); ++i)
{
InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.inputBlobsWrappers[i]);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
dataPtr->name = netInputLayer->outNames[i];
#else
dataPtr->setName(netInputLayer->outNames[i]);
#endif
}
}
else
......@@ -1510,7 +1524,11 @@ struct Net::Impl
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
{
InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
dataPtr->name = ld.name;
#else
dataPtr->setName(ld.name);
#endif
}
}
ieNode->net->addBlobs(ld.inputBlobsWrappers);
......
......@@ -111,7 +111,8 @@ public:
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
CV_Assert(!input->dims.empty());
std::vector<size_t> dims = input->getDims();
CV_Assert(!dims.empty());
InferenceEngine::Builder::Layer ieLayer(name);
ieLayer.setName(name);
......@@ -122,12 +123,10 @@ public:
else
{
ieLayer.setType("Split");
ieLayer.getParameters()["axis"] = input->dims.size() - 1;
ieLayer.getParameters()["out_sizes"] = input->dims[0];
ieLayer.getParameters()["axis"] = dims.size() - 1;
ieLayer.getParameters()["out_sizes"] = dims[0];
}
std::vector<size_t> shape(input->dims);
std::reverse(shape.begin(), shape.end());
ieLayer.setInputPorts({InferenceEngine::Port(shape)});
ieLayer.setInputPorts({InferenceEngine::Port(dims)});
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
......
......@@ -304,7 +304,7 @@ public:
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
InferenceEngine::Builder::ConcatLayer ieLayer(name);
ieLayer.setAxis(clamp(axis, input->dims.size()));
ieLayer.setAxis(clamp(axis, input->getDims().size()));
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(inputs.size()));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
......
......@@ -465,14 +465,13 @@ public:
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
{
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
CV_Assert(input->dims.size() == 4 || input->dims.size() == 5);
const int inpCn = input->dims[input->dims.size() - 2]; // NOTE: input->dims are reversed (WHIO or WHDIO)
std::vector<size_t> dims = input->getDims();
CV_Assert(dims.size() == 4 || dims.size() == 5);
const int inpCn = dims[1];
const int outCn = blobs[0].size[0];
const int inpGroupCn = blobs[0].size[1];
const int group = inpCn / inpGroupCn;
InferenceEngine::Layout layout = (input->dims.size() == 4) ? InferenceEngine::Layout::OIHW :
InferenceEngine::Layout layout = (dims.size() == 4) ? InferenceEngine::Layout::OIHW :
InferenceEngine::Layout::NCDHW;
auto ieWeights = wrapToInfEngineBlob(blobs[0], layout);
......@@ -485,9 +484,10 @@ public:
}
else
{
ieWeights = InferenceEngine::make_shared_blob<float>(
InferenceEngine::Precision::FP32, layout,
ieWeights->dims());
ieWeights = InferenceEngine::make_shared_blob<float>({
InferenceEngine::Precision::FP32,
ieWeights->getTensorDesc().getDims(), layout
});
ieWeights->allocate();
Mat newWeights = infEngineBlobToMat(ieWeights).reshape(1, outCn);
......@@ -1877,9 +1877,10 @@ public:
auto ieWeights = wrapToInfEngineBlob(blobs[0], layout);
if (fusedWeights)
{
ieWeights = InferenceEngine::make_shared_blob<float>(
InferenceEngine::Precision::FP32, layout,
ieWeights->dims());
ieWeights = InferenceEngine::make_shared_blob<float>({
InferenceEngine::Precision::FP32,
ieWeights->getTensorDesc().getDims(), layout
});
ieWeights->allocate();
int inpCn = blobs[0].size[0];
......
......@@ -261,7 +261,8 @@ public:
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
if (input->dims.size() == 4)
std::vector<size_t> dims = input->getDims();
if (dims.size() == 4)
{
InferenceEngine::Builder::NormalizeLayer ieLayer(name);
......@@ -270,13 +271,14 @@ public:
ieLayer.setEpsilon(epsilon);
InferenceEngine::Builder::Layer l = ieLayer;
const int numChannels = input->dims[2]; // NOTE: input->dims are reversed (whcn)
const int numChannels = dims[1];
InferenceEngine::Blob::Ptr weights;
if (blobs.empty())
{
weights = InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32,
InferenceEngine::Layout::C,
{(size_t)numChannels});
weights = InferenceEngine::make_shared_blob<float>({
InferenceEngine::Precision::FP32,
{(size_t)numChannels}, InferenceEngine::Layout::C
});
weights->allocate();
Mat weightsMat = infEngineBlobToMat(weights).reshape(1, numChannels);
......
......@@ -166,9 +166,11 @@ public:
if (kernel_size.size() == 3)
return preferableTarget == DNN_TARGET_CPU;
if (preferableTarget == DNN_TARGET_MYRIAD) {
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
if (type == MAX && (pad_l == 1 && pad_t == 1) && stride == Size(2, 2) ) {
return !isMyriadX();
}
#endif
return type == MAX || type == AVE;
}
else
......
......@@ -207,12 +207,13 @@ public:
}
else
{
auto weights = InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32,
{numChannels});
auto weights = InferenceEngine::make_shared_blob<float>({
InferenceEngine::Precision::FP32, {(size_t)numChannels},
InferenceEngine::Layout::C
});
weights->allocate();
std::vector<float> ones(numChannels, 1);
weights->set(ones);
float* buf = weights->buffer().as<float*>();
std::fill(buf, buf + numChannels, 1);
addConstantData("weights", weights, l);
}
if (hasBias)
......
......@@ -301,14 +301,14 @@ public:
{
std::vector<size_t> outShape(numDims);
for (int i = 0; i < numDims; ++i)
outShape[numDims - 1 - i] = sliceRanges[0][i].size();
outShape[i] = sliceRanges[0][i].size();
ieLayer.getInputPorts()[1].setParameter("type", "weights");
// Fake blob which will be moved to inputs (as weights).
auto shapeSource = InferenceEngine::make_shared_blob<float>(
InferenceEngine::Precision::FP32,
InferenceEngine::Layout::ANY, outShape);
auto shapeSource = InferenceEngine::make_shared_blob<float>({
InferenceEngine::Precision::FP32, outShape,
InferenceEngine::Layout::ANY
});
shapeSource->allocate();
addConstantData("weights", shapeSource, ieLayer);
}
......
......@@ -315,7 +315,8 @@ public:
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
InferenceEngine::Builder::SoftMaxLayer ieLayer(name);
ieLayer.setAxis(clamp(axisRaw, input->dims.size()));
ieLayer.setAxis(clamp(axisRaw, input->getDims().size()));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif // HAVE_INF_ENGINE
......
This diff is collapsed.
......@@ -92,18 +92,22 @@ public:
void forward(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers,
bool isAsync);
void initPlugin(InferenceEngine::ICNNNetwork& net);
void initPlugin(InferenceEngine::CNNNetwork& net);
void addBlobs(const std::vector<cv::Ptr<BackendWrapper> >& ptrs);
private:
InferenceEngine::Builder::Network netBuilder;
InferenceEngine::InferenceEnginePluginPtr enginePtr;
InferenceEngine::InferencePlugin plugin;
InferenceEngine::ExecutableNetwork netExec;
InferenceEngine::BlobMap allBlobs;
InferenceEngine::TargetDevice targetDevice;
std::string device_name;
#if INF_ENGINE_VER_MAJOR_LE(2019010000)
InferenceEngine::InferenceEnginePluginPtr enginePtr;
InferenceEngine::InferencePlugin plugin;
#else
bool isInit = false;
#endif
struct InfEngineReqWrapper
{
......
......@@ -136,13 +136,10 @@ static const std::vector<std::string> getOpenVINOTestModelsList()
static inline void genData(const std::vector<size_t>& dims, Mat& m, Blob::Ptr& dataPtr)
{
std::vector<int> reversedDims(dims.begin(), dims.end());
std::reverse(reversedDims.begin(), reversedDims.end());
m.create(reversedDims, CV_32F);
m.create(std::vector<int>(dims.begin(), dims.end()), CV_32F);
randu(m, -1, 1);
dataPtr = make_shared_blob<float>(Precision::FP32, dims, (float*)m.data);
dataPtr = make_shared_blob<float>({Precision::FP32, dims, Layout::ANY}, (float*)m.data);
}
void runIE(Target target, const std::string& xmlPath, const std::string& binPath,
......@@ -154,32 +151,42 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath
CNNNetwork net = reader.getNetwork();
std::string device_name;
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GT(2019010000)
Core ie;
#else
InferenceEnginePluginPtr enginePtr;
InferencePlugin plugin;
#endif
ExecutableNetwork netExec;
InferRequest infRequest;
try
{
auto dispatcher = InferenceEngine::PluginDispatcher({""});
switch (target)
{
case DNN_TARGET_CPU:
enginePtr = dispatcher.getSuitablePlugin(TargetDevice::eCPU);
device_name = "CPU";
break;
case DNN_TARGET_OPENCL:
case DNN_TARGET_OPENCL_FP16:
enginePtr = dispatcher.getSuitablePlugin(TargetDevice::eGPU);
device_name = "GPU";
break;
case DNN_TARGET_MYRIAD:
enginePtr = dispatcher.getSuitablePlugin(TargetDevice::eMYRIAD);
device_name = "MYRIAD";
break;
case DNN_TARGET_FPGA:
enginePtr = dispatcher.getPluginByDevice("HETERO:FPGA,CPU");
device_name = "FPGA";
break;
default:
CV_Error(Error::StsNotImplemented, "Unknown target");
};
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
auto dispatcher = InferenceEngine::PluginDispatcher({""});
enginePtr = dispatcher.getPluginByDevice(device_name);
#endif
if (target == DNN_TARGET_CPU || target == DNN_TARGET_FPGA)
{
std::string suffixes[] = {"_avx2", "_sse4", ""};
......@@ -202,16 +209,23 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath
try
{
IExtensionPtr extension = make_so_pointer<IExtension>(libName);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GT(2019010000)
ie.AddExtension(extension, device_name);
#else
enginePtr->AddExtension(extension, 0);
#endif
break;
}
catch(...) {}
}
// Some of networks can work without a library of extra layers.
}
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GT(2019010000)
netExec = ie.LoadNetwork(net, device_name);
#else
plugin = InferencePlugin(enginePtr);
netExec = plugin.LoadNetwork(net, {});
#endif
infRequest = netExec.CreateInferRequest();
}
catch (const std::exception& ex)
......@@ -224,7 +238,7 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath
BlobMap inputBlobs;
for (auto& it : net.getInputsInfo())
{
genData(it.second->getDims(), inputsMap[it.first], inputBlobs[it.first]);
genData(it.second->getTensorDesc().getDims(), inputsMap[it.first], inputBlobs[it.first]);
}
infRequest.SetInput(inputBlobs);
......@@ -233,7 +247,7 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath
BlobMap outputBlobs;
for (auto& it : net.getOutputsInfo())
{
genData(it.second->dims, outputsMap[it.first], outputBlobs[it.first]);
genData(it.second->getTensorDesc().getDims(), outputsMap[it.first], outputBlobs[it.first]);
}
infRequest.SetOutput(outputBlobs);
......
......@@ -467,6 +467,42 @@ INSTANTIATE_TEST_CASE_P(/**/, Async, Combine(
Values(CV_32F, CV_8U),
testing::ValuesIn(getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE))
));
typedef testing::TestWithParam<Target> Test_Model_Optimizer;
TEST_P(Test_Model_Optimizer, forward_two_nets)
{
const int target = GetParam();
const std::string suffix = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? "_fp16" : "";
const std::string& model = findDataFile("dnn/layers/layer_convolution" + suffix + ".bin");
const std::string& proto = findDataFile("dnn/layers/layer_convolution" + suffix + ".xml");
Net net0 = readNet(model, proto);
net0.setPreferableTarget(target);
Net net1 = readNet(model, proto);
net1.setPreferableTarget(target);
// Generate inputs.
int blobSize[] = {2, 6, 75, 113};
Mat input(4, &blobSize[0], CV_32F);
randu(input, 0, 255);
net0.setInput(input);
Mat ref0 = net0.forward().clone();
net1.setInput(input);
Mat ref1 = net1.forward();
net0.setInput(input);
Mat ref2 = net0.forward();
normAssert(ref0, ref2, 0, 0);
}
INSTANTIATE_TEST_CASE_P(/**/, Test_Model_Optimizer,
testing::ValuesIn(getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE))
);
#endif // HAVE_INF_ENGINE
}} // namespace
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment