Commit 0e1ef8f8 authored by Lubov Batanina's avatar Lubov Batanina Committed by Alexander Alekhin

Merge pull request #15184 from l-bat:IE_R2

Support new IE API (#15184)

* Add support OpenVINO R2 for layers

* Add Core API

* Fix tests

* Fix expectNoFallbacksFromIE for ONNX nets

* Remove deprecated API

* Remove td

* Remove TargetDevice

* Fix Async

* Add test

* Fix detectMyriadX

* Fix test

* Fix warning
parent cf93a05d
...@@ -713,21 +713,23 @@ struct DataLayer : public Layer ...@@ -713,21 +713,23 @@ struct DataLayer : public Layer
CV_Assert(numChannels <= 4); CV_Assert(numChannels <= 4);
// Scale // Scale
auto weights = InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32, InferenceEngine::TensorDesc td(InferenceEngine::Precision::FP32, {numChannels},
{numChannels}); InferenceEngine::Layout::C);
auto weights = InferenceEngine::make_shared_blob<float>(td);
weights->allocate(); weights->allocate();
weights->set(std::vector<float>(numChannels, scaleFactors[0]));
float* weight_buf = weights->buffer().as<float*>();
std::fill(weight_buf, weight_buf + numChannels, scaleFactors[0]);
// Mean subtraction // Mean subtraction
auto biases = InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32, auto biases = InferenceEngine::make_shared_blob<float>(td);
{numChannels});
biases->allocate(); biases->allocate();
std::vector<float> biasesVec(numChannels); float* bias_buf = biases->buffer().as<float*>();
for (int i = 0; i < numChannels; ++i) for (int i = 0; i < numChannels; ++i)
{ {
biasesVec[i] = -means[0][i] * scaleFactors[0]; bias_buf[i] = -means[0][i] * scaleFactors[0];
} }
biases->set(biasesVec);
InferenceEngine::Builder::Layer ieLayer = InferenceEngine::Builder::ScaleShiftLayer(name); InferenceEngine::Builder::Layer ieLayer = InferenceEngine::Builder::ScaleShiftLayer(name);
addConstantData("weights", weights, ieLayer); addConstantData("weights", weights, ieLayer);
...@@ -1473,7 +1475,11 @@ struct Net::Impl ...@@ -1473,7 +1475,11 @@ struct Net::Impl
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i) for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
{ {
InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]); InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
dataPtr->name = netInputLayer->outNames.empty() ? ld.name : netInputLayer->outNames[i]; dataPtr->name = netInputLayer->outNames.empty() ? ld.name : netInputLayer->outNames[i];
#else
dataPtr->setName(netInputLayer->outNames.empty() ? ld.name : netInputLayer->outNames[i]);
#endif
} }
} }
else else
...@@ -1481,7 +1487,11 @@ struct Net::Impl ...@@ -1481,7 +1487,11 @@ struct Net::Impl
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i) for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
{ {
InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]); InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
dataPtr->name = ld.name; dataPtr->name = ld.name;
#else
dataPtr->setName(ld.name);
#endif
} }
} }
} }
...@@ -1502,7 +1512,11 @@ struct Net::Impl ...@@ -1502,7 +1512,11 @@ struct Net::Impl
for (int i = 0; i < ld.inputBlobsWrappers.size(); ++i) for (int i = 0; i < ld.inputBlobsWrappers.size(); ++i)
{ {
InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.inputBlobsWrappers[i]); InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.inputBlobsWrappers[i]);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
dataPtr->name = netInputLayer->outNames[i]; dataPtr->name = netInputLayer->outNames[i];
#else
dataPtr->setName(netInputLayer->outNames[i]);
#endif
} }
} }
else else
...@@ -1510,7 +1524,11 @@ struct Net::Impl ...@@ -1510,7 +1524,11 @@ struct Net::Impl
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i) for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
{ {
InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]); InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
dataPtr->name = ld.name; dataPtr->name = ld.name;
#else
dataPtr->setName(ld.name);
#endif
} }
} }
ieNode->net->addBlobs(ld.inputBlobsWrappers); ieNode->net->addBlobs(ld.inputBlobsWrappers);
......
...@@ -111,7 +111,8 @@ public: ...@@ -111,7 +111,8 @@ public:
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{ {
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
CV_Assert(!input->dims.empty()); std::vector<size_t> dims = input->getDims();
CV_Assert(!dims.empty());
InferenceEngine::Builder::Layer ieLayer(name); InferenceEngine::Builder::Layer ieLayer(name);
ieLayer.setName(name); ieLayer.setName(name);
...@@ -122,12 +123,10 @@ public: ...@@ -122,12 +123,10 @@ public:
else else
{ {
ieLayer.setType("Split"); ieLayer.setType("Split");
ieLayer.getParameters()["axis"] = input->dims.size() - 1; ieLayer.getParameters()["axis"] = dims.size() - 1;
ieLayer.getParameters()["out_sizes"] = input->dims[0]; ieLayer.getParameters()["out_sizes"] = dims[0];
} }
std::vector<size_t> shape(input->dims); ieLayer.setInputPorts({InferenceEngine::Port(dims)});
std::reverse(shape.begin(), shape.end());
ieLayer.setInputPorts({InferenceEngine::Port(shape)});
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1)); ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer)); return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
} }
......
...@@ -304,7 +304,7 @@ public: ...@@ -304,7 +304,7 @@ public:
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
InferenceEngine::Builder::ConcatLayer ieLayer(name); InferenceEngine::Builder::ConcatLayer ieLayer(name);
ieLayer.setAxis(clamp(axis, input->dims.size())); ieLayer.setAxis(clamp(axis, input->getDims().size()));
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(inputs.size())); ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(inputs.size()));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer)); return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
} }
......
...@@ -465,15 +465,14 @@ public: ...@@ -465,15 +465,14 @@ public:
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
{ {
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
CV_Assert(input->dims.size() == 4 || input->dims.size() == 5); std::vector<size_t> dims = input->getDims();
CV_Assert(dims.size() == 4 || dims.size() == 5);
const int inpCn = input->dims[input->dims.size() - 2]; // NOTE: input->dims are reversed (WHIO or WHDIO) const int inpCn = dims[1];
const int outCn = blobs[0].size[0]; const int outCn = blobs[0].size[0];
const int inpGroupCn = blobs[0].size[1]; const int inpGroupCn = blobs[0].size[1];
const int group = inpCn / inpGroupCn; const int group = inpCn / inpGroupCn;
InferenceEngine::Layout layout = (dims.size() == 4) ? InferenceEngine::Layout::OIHW :
InferenceEngine::Layout layout = (input->dims.size() == 4) ? InferenceEngine::Layout::OIHW : InferenceEngine::Layout::NCDHW;
InferenceEngine::Layout::NCDHW;
auto ieWeights = wrapToInfEngineBlob(blobs[0], layout); auto ieWeights = wrapToInfEngineBlob(blobs[0], layout);
if (fusedWeights) if (fusedWeights)
...@@ -485,9 +484,10 @@ public: ...@@ -485,9 +484,10 @@ public:
} }
else else
{ {
ieWeights = InferenceEngine::make_shared_blob<float>( ieWeights = InferenceEngine::make_shared_blob<float>({
InferenceEngine::Precision::FP32, layout, InferenceEngine::Precision::FP32,
ieWeights->dims()); ieWeights->getTensorDesc().getDims(), layout
});
ieWeights->allocate(); ieWeights->allocate();
Mat newWeights = infEngineBlobToMat(ieWeights).reshape(1, outCn); Mat newWeights = infEngineBlobToMat(ieWeights).reshape(1, outCn);
...@@ -1877,9 +1877,10 @@ public: ...@@ -1877,9 +1877,10 @@ public:
auto ieWeights = wrapToInfEngineBlob(blobs[0], layout); auto ieWeights = wrapToInfEngineBlob(blobs[0], layout);
if (fusedWeights) if (fusedWeights)
{ {
ieWeights = InferenceEngine::make_shared_blob<float>( ieWeights = InferenceEngine::make_shared_blob<float>({
InferenceEngine::Precision::FP32, layout, InferenceEngine::Precision::FP32,
ieWeights->dims()); ieWeights->getTensorDesc().getDims(), layout
});
ieWeights->allocate(); ieWeights->allocate();
int inpCn = blobs[0].size[0]; int inpCn = blobs[0].size[0];
......
...@@ -261,7 +261,8 @@ public: ...@@ -261,7 +261,8 @@ public:
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{ {
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
if (input->dims.size() == 4) std::vector<size_t> dims = input->getDims();
if (dims.size() == 4)
{ {
InferenceEngine::Builder::NormalizeLayer ieLayer(name); InferenceEngine::Builder::NormalizeLayer ieLayer(name);
...@@ -270,13 +271,14 @@ public: ...@@ -270,13 +271,14 @@ public:
ieLayer.setEpsilon(epsilon); ieLayer.setEpsilon(epsilon);
InferenceEngine::Builder::Layer l = ieLayer; InferenceEngine::Builder::Layer l = ieLayer;
const int numChannels = input->dims[2]; // NOTE: input->dims are reversed (whcn) const int numChannels = dims[1];
InferenceEngine::Blob::Ptr weights; InferenceEngine::Blob::Ptr weights;
if (blobs.empty()) if (blobs.empty())
{ {
weights = InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32, weights = InferenceEngine::make_shared_blob<float>({
InferenceEngine::Layout::C, InferenceEngine::Precision::FP32,
{(size_t)numChannels}); {(size_t)numChannels}, InferenceEngine::Layout::C
});
weights->allocate(); weights->allocate();
Mat weightsMat = infEngineBlobToMat(weights).reshape(1, numChannels); Mat weightsMat = infEngineBlobToMat(weights).reshape(1, numChannels);
......
...@@ -166,9 +166,11 @@ public: ...@@ -166,9 +166,11 @@ public:
if (kernel_size.size() == 3) if (kernel_size.size() == 3)
return preferableTarget == DNN_TARGET_CPU; return preferableTarget == DNN_TARGET_CPU;
if (preferableTarget == DNN_TARGET_MYRIAD) { if (preferableTarget == DNN_TARGET_MYRIAD) {
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
if (type == MAX && (pad_l == 1 && pad_t == 1) && stride == Size(2, 2) ) { if (type == MAX && (pad_l == 1 && pad_t == 1) && stride == Size(2, 2) ) {
return !isMyriadX(); return !isMyriadX();
} }
#endif
return type == MAX || type == AVE; return type == MAX || type == AVE;
} }
else else
......
...@@ -207,12 +207,13 @@ public: ...@@ -207,12 +207,13 @@ public:
} }
else else
{ {
auto weights = InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32, auto weights = InferenceEngine::make_shared_blob<float>({
{numChannels}); InferenceEngine::Precision::FP32, {(size_t)numChannels},
InferenceEngine::Layout::C
});
weights->allocate(); weights->allocate();
float* buf = weights->buffer().as<float*>();
std::vector<float> ones(numChannels, 1); std::fill(buf, buf + numChannels, 1);
weights->set(ones);
addConstantData("weights", weights, l); addConstantData("weights", weights, l);
} }
if (hasBias) if (hasBias)
......
...@@ -301,14 +301,14 @@ public: ...@@ -301,14 +301,14 @@ public:
{ {
std::vector<size_t> outShape(numDims); std::vector<size_t> outShape(numDims);
for (int i = 0; i < numDims; ++i) for (int i = 0; i < numDims; ++i)
outShape[numDims - 1 - i] = sliceRanges[0][i].size(); outShape[i] = sliceRanges[0][i].size();
ieLayer.getInputPorts()[1].setParameter("type", "weights"); ieLayer.getInputPorts()[1].setParameter("type", "weights");
// Fake blob which will be moved to inputs (as weights). auto shapeSource = InferenceEngine::make_shared_blob<float>({
auto shapeSource = InferenceEngine::make_shared_blob<float>( InferenceEngine::Precision::FP32, outShape,
InferenceEngine::Precision::FP32, InferenceEngine::Layout::ANY
InferenceEngine::Layout::ANY, outShape); });
shapeSource->allocate(); shapeSource->allocate();
addConstantData("weights", shapeSource, ieLayer); addConstantData("weights", shapeSource, ieLayer);
} }
......
...@@ -315,7 +315,8 @@ public: ...@@ -315,7 +315,8 @@ public:
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
InferenceEngine::Builder::SoftMaxLayer ieLayer(name); InferenceEngine::Builder::SoftMaxLayer ieLayer(name);
ieLayer.setAxis(clamp(axisRaw, input->dims.size())); ieLayer.setAxis(clamp(axisRaw, input->getDims().size()));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer)); return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
} }
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
......
This diff is collapsed.
...@@ -92,18 +92,22 @@ public: ...@@ -92,18 +92,22 @@ public:
void forward(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers, void forward(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers,
bool isAsync); bool isAsync);
void initPlugin(InferenceEngine::ICNNNetwork& net); void initPlugin(InferenceEngine::CNNNetwork& net);
void addBlobs(const std::vector<cv::Ptr<BackendWrapper> >& ptrs); void addBlobs(const std::vector<cv::Ptr<BackendWrapper> >& ptrs);
private: private:
InferenceEngine::Builder::Network netBuilder; InferenceEngine::Builder::Network netBuilder;
InferenceEngine::InferenceEnginePluginPtr enginePtr;
InferenceEngine::InferencePlugin plugin;
InferenceEngine::ExecutableNetwork netExec; InferenceEngine::ExecutableNetwork netExec;
InferenceEngine::BlobMap allBlobs; InferenceEngine::BlobMap allBlobs;
InferenceEngine::TargetDevice targetDevice; std::string device_name;
#if INF_ENGINE_VER_MAJOR_LE(2019010000)
InferenceEngine::InferenceEnginePluginPtr enginePtr;
InferenceEngine::InferencePlugin plugin;
#else
bool isInit = false;
#endif
struct InfEngineReqWrapper struct InfEngineReqWrapper
{ {
......
...@@ -136,13 +136,10 @@ static const std::vector<std::string> getOpenVINOTestModelsList() ...@@ -136,13 +136,10 @@ static const std::vector<std::string> getOpenVINOTestModelsList()
static inline void genData(const std::vector<size_t>& dims, Mat& m, Blob::Ptr& dataPtr) static inline void genData(const std::vector<size_t>& dims, Mat& m, Blob::Ptr& dataPtr)
{ {
std::vector<int> reversedDims(dims.begin(), dims.end()); m.create(std::vector<int>(dims.begin(), dims.end()), CV_32F);
std::reverse(reversedDims.begin(), reversedDims.end());
m.create(reversedDims, CV_32F);
randu(m, -1, 1); randu(m, -1, 1);
dataPtr = make_shared_blob<float>(Precision::FP32, dims, (float*)m.data); dataPtr = make_shared_blob<float>({Precision::FP32, dims, Layout::ANY}, (float*)m.data);
} }
void runIE(Target target, const std::string& xmlPath, const std::string& binPath, void runIE(Target target, const std::string& xmlPath, const std::string& binPath,
...@@ -154,32 +151,42 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath ...@@ -154,32 +151,42 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath
CNNNetwork net = reader.getNetwork(); CNNNetwork net = reader.getNetwork();
std::string device_name;
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GT(2019010000)
Core ie;
#else
InferenceEnginePluginPtr enginePtr; InferenceEnginePluginPtr enginePtr;
InferencePlugin plugin; InferencePlugin plugin;
#endif
ExecutableNetwork netExec; ExecutableNetwork netExec;
InferRequest infRequest; InferRequest infRequest;
try try
{ {
auto dispatcher = InferenceEngine::PluginDispatcher({""});
switch (target) switch (target)
{ {
case DNN_TARGET_CPU: case DNN_TARGET_CPU:
enginePtr = dispatcher.getSuitablePlugin(TargetDevice::eCPU); device_name = "CPU";
break; break;
case DNN_TARGET_OPENCL: case DNN_TARGET_OPENCL:
case DNN_TARGET_OPENCL_FP16: case DNN_TARGET_OPENCL_FP16:
enginePtr = dispatcher.getSuitablePlugin(TargetDevice::eGPU); device_name = "GPU";
break; break;
case DNN_TARGET_MYRIAD: case DNN_TARGET_MYRIAD:
enginePtr = dispatcher.getSuitablePlugin(TargetDevice::eMYRIAD); device_name = "MYRIAD";
break; break;
case DNN_TARGET_FPGA: case DNN_TARGET_FPGA:
enginePtr = dispatcher.getPluginByDevice("HETERO:FPGA,CPU"); device_name = "FPGA";
break; break;
default: default:
CV_Error(Error::StsNotImplemented, "Unknown target"); CV_Error(Error::StsNotImplemented, "Unknown target");
}; };
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
auto dispatcher = InferenceEngine::PluginDispatcher({""});
enginePtr = dispatcher.getPluginByDevice(device_name);
#endif
if (target == DNN_TARGET_CPU || target == DNN_TARGET_FPGA) if (target == DNN_TARGET_CPU || target == DNN_TARGET_FPGA)
{ {
std::string suffixes[] = {"_avx2", "_sse4", ""}; std::string suffixes[] = {"_avx2", "_sse4", ""};
...@@ -202,16 +209,23 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath ...@@ -202,16 +209,23 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath
try try
{ {
IExtensionPtr extension = make_so_pointer<IExtension>(libName); IExtensionPtr extension = make_so_pointer<IExtension>(libName);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GT(2019010000)
ie.AddExtension(extension, device_name);
#else
enginePtr->AddExtension(extension, 0); enginePtr->AddExtension(extension, 0);
#endif
break; break;
} }
catch(...) {} catch(...) {}
} }
// Some of networks can work without a library of extra layers. // Some of networks can work without a library of extra layers.
} }
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GT(2019010000)
netExec = ie.LoadNetwork(net, device_name);
#else
plugin = InferencePlugin(enginePtr); plugin = InferencePlugin(enginePtr);
netExec = plugin.LoadNetwork(net, {}); netExec = plugin.LoadNetwork(net, {});
#endif
infRequest = netExec.CreateInferRequest(); infRequest = netExec.CreateInferRequest();
} }
catch (const std::exception& ex) catch (const std::exception& ex)
...@@ -224,7 +238,7 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath ...@@ -224,7 +238,7 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath
BlobMap inputBlobs; BlobMap inputBlobs;
for (auto& it : net.getInputsInfo()) for (auto& it : net.getInputsInfo())
{ {
genData(it.second->getDims(), inputsMap[it.first], inputBlobs[it.first]); genData(it.second->getTensorDesc().getDims(), inputsMap[it.first], inputBlobs[it.first]);
} }
infRequest.SetInput(inputBlobs); infRequest.SetInput(inputBlobs);
...@@ -233,7 +247,7 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath ...@@ -233,7 +247,7 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath
BlobMap outputBlobs; BlobMap outputBlobs;
for (auto& it : net.getOutputsInfo()) for (auto& it : net.getOutputsInfo())
{ {
genData(it.second->dims, outputsMap[it.first], outputBlobs[it.first]); genData(it.second->getTensorDesc().getDims(), outputsMap[it.first], outputBlobs[it.first]);
} }
infRequest.SetOutput(outputBlobs); infRequest.SetOutput(outputBlobs);
......
...@@ -467,6 +467,42 @@ INSTANTIATE_TEST_CASE_P(/**/, Async, Combine( ...@@ -467,6 +467,42 @@ INSTANTIATE_TEST_CASE_P(/**/, Async, Combine(
Values(CV_32F, CV_8U), Values(CV_32F, CV_8U),
testing::ValuesIn(getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE)) testing::ValuesIn(getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE))
)); ));
typedef testing::TestWithParam<Target> Test_Model_Optimizer;
TEST_P(Test_Model_Optimizer, forward_two_nets)
{
const int target = GetParam();
const std::string suffix = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? "_fp16" : "";
const std::string& model = findDataFile("dnn/layers/layer_convolution" + suffix + ".bin");
const std::string& proto = findDataFile("dnn/layers/layer_convolution" + suffix + ".xml");
Net net0 = readNet(model, proto);
net0.setPreferableTarget(target);
Net net1 = readNet(model, proto);
net1.setPreferableTarget(target);
// Generate inputs.
int blobSize[] = {2, 6, 75, 113};
Mat input(4, &blobSize[0], CV_32F);
randu(input, 0, 255);
net0.setInput(input);
Mat ref0 = net0.forward().clone();
net1.setInput(input);
Mat ref1 = net1.forward();
net0.setInput(input);
Mat ref2 = net0.forward();
normAssert(ref0, ref2, 0, 0);
}
INSTANTIATE_TEST_CASE_P(/**/, Test_Model_Optimizer,
testing::ValuesIn(getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE))
);
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
}} // namespace }} // namespace
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment