Commit ba703157 authored by Dmitry Kurtaev's avatar Dmitry Kurtaev Committed by Alexander Alekhin

Merge pull request #15063 from dkurt:dnn_ie_ocv_layers

* Wrap unsupported by IE layers as custom layers

* Replace pointers to layers blobs to their shapes

* Enable Faster R-CNN with IE backend on CPU
parent 7e46766c
...@@ -1556,11 +1556,37 @@ struct Net::Impl ...@@ -1556,11 +1556,37 @@ struct Net::Impl
Ptr<Layer> layer = ld.layerInstance; Ptr<Layer> layer = ld.layerInstance;
if (!fused && !layer->supportBackend(preferableBackend)) if (!fused && !layer->supportBackend(preferableBackend))
{ {
addInfEngineNetOutputs(ld); bool customizable = ld.id != 0 && ld.outputBlobs.size() == 1;
net = Ptr<InfEngineBackendNet>(); // TODO: there is a bug in Myriad plugin with custom layers shape infer.
netBlobsWrappers.clear(); // Is not used for R5 release but we don't wrap it to #ifdef. if (preferableTarget == DNN_TARGET_MYRIAD)
layer->preferableTarget = DNN_TARGET_CPU; {
continue; for (int i = 0; customizable && i < ld.inputBlobs.size(); ++i)
{
customizable = ld.inputBlobs[i]->size[0] == 1;
}
}
// TODO: fix these workarounds
if (preferableTarget == DNN_TARGET_MYRIAD ||
preferableTarget == DNN_TARGET_OPENCL ||
preferableTarget == DNN_TARGET_OPENCL_FP16)
customizable &= ld.type != "Concat";
if (preferableTarget == DNN_TARGET_OPENCL ||
preferableTarget == DNN_TARGET_OPENCL_FP16)
customizable &= ld.type != "Power";
if (preferableTarget == DNN_TARGET_OPENCL)
customizable &= ld.type != "Eltwise";
if (!customizable)
{
addInfEngineNetOutputs(ld);
net = Ptr<InfEngineBackendNet>();
netBlobsWrappers.clear(); // Is not used for R5 release but we don't wrap it to #ifdef.
layer->preferableTarget = DNN_TARGET_CPU;
continue;
}
} }
ld.skip = true; // Initially skip all Inference Engine supported layers. ld.skip = true; // Initially skip all Inference Engine supported layers.
...@@ -1599,7 +1625,13 @@ struct Net::Impl ...@@ -1599,7 +1625,13 @@ struct Net::Impl
if (!fused) if (!fused)
{ {
node = layer->initInfEngine(ld.inputBlobsWrappers); if (layer->supportBackend(preferableBackend))
node = layer->initInfEngine(ld.inputBlobsWrappers);
else
{
node = Ptr<BackendNode>(new InfEngineBackendNode(
ld.layerInstance, ld.inputBlobs, ld.outputBlobs, ld.internals));
}
} }
else if (node.empty()) else if (node.empty())
continue; continue;
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
#include "../precomp.hpp" #include "../precomp.hpp"
#include "../op_inf_engine.hpp"
#include "layers_common.hpp" #include "layers_common.hpp"
#ifdef HAVE_OPENCL #ifdef HAVE_OPENCL
...@@ -23,6 +24,11 @@ public: ...@@ -23,6 +24,11 @@ public:
CV_Assert(blobs.size() == 1); CV_Assert(blobs.size() == 1);
} }
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_INFERENCE_ENGINE;
}
virtual bool getMemoryShapes(const std::vector<MatShape> &inputs, virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs, const int requiredOutputs,
std::vector<MatShape> &outputs, std::vector<MatShape> &outputs,
...@@ -58,6 +64,15 @@ public: ...@@ -58,6 +64,15 @@ public:
outputs_arr.getMatVector(outputs); outputs_arr.getMatVector(outputs);
blobs[0].copyTo(outputs[0]); blobs[0].copyTo(outputs[0]);
} }
#ifdef HAVE_INF_ENGINE
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::ConstLayer ieLayer(name);
ieLayer.setData(wrapToInfEngineBlob(blobs[0]));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif // HAVE_INF_ENGINE
}; };
Ptr<Layer> ConstLayer::create(const LayerParams& params) Ptr<Layer> ConstLayer::create(const LayerParams& params)
......
...@@ -14,6 +14,7 @@ class CropAndResizeLayerImpl CV_FINAL : public CropAndResizeLayer ...@@ -14,6 +14,7 @@ class CropAndResizeLayerImpl CV_FINAL : public CropAndResizeLayer
public: public:
CropAndResizeLayerImpl(const LayerParams& params) CropAndResizeLayerImpl(const LayerParams& params)
{ {
setParamsFrom(params);
CV_Assert_N(params.has("width"), params.has("height")); CV_Assert_N(params.has("width"), params.has("height"));
outWidth = params.get<float>("width"); outWidth = params.get<float>("width");
outHeight = params.get<float>("height"); outHeight = params.get<float>("height");
......
...@@ -927,7 +927,7 @@ public: ...@@ -927,7 +927,7 @@ public:
ieLayer.setShareLocation(_shareLocation); ieLayer.setShareLocation(_shareLocation);
ieLayer.setBackgroudLabelId(_backgroundLabelId); ieLayer.setBackgroudLabelId(_backgroundLabelId);
ieLayer.setNMSThreshold(_nmsThreshold); ieLayer.setNMSThreshold(_nmsThreshold);
ieLayer.setTopK(_topK); ieLayer.setTopK(_topK > 0 ? _topK : _keepTopK);
ieLayer.setKeepTopK(_keepTopK); ieLayer.setKeepTopK(_keepTopK);
ieLayer.setConfidenceThreshold(_confidenceThreshold); ieLayer.setConfidenceThreshold(_confidenceThreshold);
ieLayer.setVariantEncodedInTarget(_varianceEncodedInTarget); ieLayer.setVariantEncodedInTarget(_varianceEncodedInTarget);
......
This diff is collapsed.
...@@ -137,12 +137,17 @@ class InfEngineBackendNode : public BackendNode ...@@ -137,12 +137,17 @@ class InfEngineBackendNode : public BackendNode
public: public:
InfEngineBackendNode(const InferenceEngine::Builder::Layer& layer); InfEngineBackendNode(const InferenceEngine::Builder::Layer& layer);
InfEngineBackendNode(Ptr<Layer>& layer, std::vector<Mat*>& inputs,
std::vector<Mat>& outputs, std::vector<Mat>& internals);
void connect(std::vector<Ptr<BackendWrapper> >& inputs, void connect(std::vector<Ptr<BackendWrapper> >& inputs,
std::vector<Ptr<BackendWrapper> >& outputs); std::vector<Ptr<BackendWrapper> >& outputs);
// Inference Engine network object that allows to obtain the outputs of this layer. // Inference Engine network object that allows to obtain the outputs of this layer.
InferenceEngine::Builder::Layer layer; InferenceEngine::Builder::Layer layer;
Ptr<InfEngineBackendNet> net; Ptr<InfEngineBackendNet> net;
// CPU fallback in case of unsupported Inference Engine layer.
Ptr<dnn::Layer> cvLayer;
}; };
class InfEngineBackendWrapper : public BackendWrapper class InfEngineBackendWrapper : public BackendWrapper
...@@ -173,6 +178,9 @@ InferenceEngine::DataPtr infEngineDataNode(const Ptr<BackendWrapper>& ptr); ...@@ -173,6 +178,9 @@ InferenceEngine::DataPtr infEngineDataNode(const Ptr<BackendWrapper>& ptr);
Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob); Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob);
void infEngineBlobsToMats(const std::vector<InferenceEngine::Blob::Ptr>& blobs,
std::vector<Mat>& mats);
// Convert Inference Engine blob with FP32 precision to FP16 precision. // Convert Inference Engine blob with FP32 precision to FP16 precision.
// Allocates memory for a new blob. // Allocates memory for a new blob.
InferenceEngine::Blob::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob); InferenceEngine::Blob::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob);
......
...@@ -53,17 +53,6 @@ static std::string _tf(TString filename) ...@@ -53,17 +53,6 @@ static std::string _tf(TString filename)
return (getOpenCVExtraDir() + "/dnn/") + filename; return (getOpenCVExtraDir() + "/dnn/") + filename;
} }
static std::vector<String> getOutputsNames(const Net& net)
{
std::vector<String> names;
std::vector<int> outLayers = net.getUnconnectedOutLayers();
std::vector<String> layersNames = net.getLayerNames();
names.resize(outLayers.size());
for (size_t i = 0; i < outLayers.size(); ++i)
names[i] = layersNames[outLayers[i] - 1];
return names;
}
TEST(Test_Darknet, read_tiny_yolo_voc) TEST(Test_Darknet, read_tiny_yolo_voc)
{ {
Net net = readNetFromDarknet(_tf("tiny-yolo-voc.cfg")); Net net = readNetFromDarknet(_tf("tiny-yolo-voc.cfg"));
...@@ -159,7 +148,7 @@ public: ...@@ -159,7 +148,7 @@ public:
net.setPreferableTarget(target); net.setPreferableTarget(target);
net.setInput(inp); net.setInput(inp);
std::vector<Mat> outs; std::vector<Mat> outs;
net.forward(outs, getOutputsNames(net)); net.forward(outs, net.getUnconnectedOutLayersNames());
for (int b = 0; b < batch_size; ++b) for (int b = 0; b < batch_size; ++b)
{ {
...@@ -339,6 +328,62 @@ TEST_P(Test_Darknet_nets, TinyYoloVoc) ...@@ -339,6 +328,62 @@ TEST_P(Test_Darknet_nets, TinyYoloVoc)
} }
} }
#ifdef HAVE_INF_ENGINE
static const std::chrono::milliseconds async_timeout(500);
typedef testing::TestWithParam<tuple<std::string, Target> > Test_Darknet_nets_async;
TEST_P(Test_Darknet_nets_async, Accuracy)
{
applyTestTag(CV_TEST_TAG_MEMORY_512MB);
std::string prefix = get<0>(GetParam());
int target = get<1>(GetParam());
const int numInputs = 2;
std::vector<Mat> inputs(numInputs);
int blobSize[] = {1, 3, 416, 416};
for (int i = 0; i < numInputs; ++i)
{
inputs[i].create(4, &blobSize[0], CV_32F);
randu(inputs[i], 0, 1);
}
Net netSync = readNet(findDataFile("dnn/" + prefix + ".cfg"),
findDataFile("dnn/" + prefix + ".weights", false));
netSync.setPreferableTarget(target);
// Run synchronously.
std::vector<Mat> refs(numInputs);
for (int i = 0; i < numInputs; ++i)
{
netSync.setInput(inputs[i]);
refs[i] = netSync.forward().clone();
}
Net netAsync = readNet(findDataFile("dnn/" + prefix + ".cfg"),
findDataFile("dnn/" + prefix + ".weights", false));
netAsync.setPreferableTarget(target);
// Run asynchronously. To make test more robust, process inputs in the reversed order.
for (int i = numInputs - 1; i >= 0; --i)
{
netAsync.setInput(inputs[i]);
AsyncArray out = netAsync.forwardAsync();
ASSERT_TRUE(out.valid());
Mat result;
EXPECT_TRUE(out.get(result, async_timeout));
normAssert(refs[i], result, format("Index: %d", i).c_str(), 0, 0);
}
}
INSTANTIATE_TEST_CASE_P(/**/, Test_Darknet_nets_async, Combine(
Values("yolo-voc", "tiny-yolo-voc", "yolov3"),
ValuesIn(getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE))
));
#endif
TEST_P(Test_Darknet_nets, YOLOv3) TEST_P(Test_Darknet_nets, YOLOv3)
{ {
applyTestTag(CV_TEST_TAG_LONG, (target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_1GB : CV_TEST_TAG_MEMORY_2GB)); applyTestTag(CV_TEST_TAG_LONG, (target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_1GB : CV_TEST_TAG_MEMORY_2GB));
...@@ -376,6 +421,16 @@ TEST_P(Test_Darknet_nets, YOLOv3) ...@@ -376,6 +421,16 @@ TEST_P(Test_Darknet_nets, YOLOv3)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL) // Test with 'batch size 2' is disabled for DLIE/OpenCL target applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL) // Test with 'batch size 2' is disabled for DLIE/OpenCL target
#endif #endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2019020000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
{
if (target == DNN_TARGET_OPENCL)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_2019R2);
if (target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_2019R2);
}
#endif
{ {
SCOPED_TRACE("batch size 2"); SCOPED_TRACE("batch size 2");
testDarknetModel(config_file, weights_file, ref, scoreDiff, iouDiff); testDarknetModel(config_file, weights_file, ref, scoreDiff, iouDiff);
......
...@@ -554,6 +554,11 @@ TEST_P(ReLU, Accuracy) ...@@ -554,6 +554,11 @@ TEST_P(ReLU, Accuracy)
Backend backendId = get<0>(get<1>(GetParam())); Backend backendId = get<0>(get<1>(GetParam()));
Target targetId = get<1>(get<1>(GetParam())); Target targetId = get<1>(get<1>(GetParam()));
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2019020000)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_MYRIAD && negativeSlope < 0)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE, CV_TEST_TAG_DNN_SKIP_IE_2019R2);
#endif
LayerParams lp; LayerParams lp;
lp.set("negative_slope", negativeSlope); lp.set("negative_slope", negativeSlope);
lp.type = "ReLU"; lp.type = "ReLU";
......
...@@ -1112,7 +1112,7 @@ INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_DLDT_two_inputs, Combine( ...@@ -1112,7 +1112,7 @@ INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_DLDT_two_inputs, Combine(
class UnsupportedLayer : public Layer class UnsupportedLayer : public Layer
{ {
public: public:
UnsupportedLayer(const LayerParams &params) {} UnsupportedLayer(const LayerParams &params) : Layer(params) {}
static Ptr<Layer> create(const LayerParams& params) static Ptr<Layer> create(const LayerParams& params)
{ {
......
...@@ -145,8 +145,17 @@ TEST_P(Test_TensorFlow_layers, padding) ...@@ -145,8 +145,17 @@ TEST_P(Test_TensorFlow_layers, padding)
{ {
runTensorFlowNet("padding_valid"); runTensorFlowNet("padding_valid");
runTensorFlowNet("spatial_padding"); runTensorFlowNet("spatial_padding");
runTensorFlowNet("keras_pad_concat");
runTensorFlowNet("mirror_pad"); runTensorFlowNet("mirror_pad");
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2019020000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
{
if (target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_2019R2);
if (target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_2019R2);
}
#endif
runTensorFlowNet("keras_pad_concat");
} }
TEST_P(Test_TensorFlow_layers, padding_same) TEST_P(Test_TensorFlow_layers, padding_same)
...@@ -472,7 +481,7 @@ TEST_P(Test_TensorFlow_nets, Faster_RCNN) ...@@ -472,7 +481,7 @@ TEST_P(Test_TensorFlow_nets, Faster_RCNN)
"faster_rcnn_resnet50_coco_2018_01_28"}; "faster_rcnn_resnet50_coco_2018_01_28"};
checkBackend(); checkBackend();
if (backend == DNN_BACKEND_INFERENCE_ENGINE) if (backend == DNN_BACKEND_INFERENCE_ENGINE && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16); applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
...@@ -573,6 +582,10 @@ TEST_P(Test_TensorFlow_nets, EAST_text_detection) ...@@ -573,6 +582,10 @@ TEST_P(Test_TensorFlow_nets, EAST_text_detection)
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16 &&
INF_ENGINE_VER_MAJOR_EQ(2019020000))
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_2019R2);
#endif #endif
checkBackend(); checkBackend();
...@@ -673,7 +686,8 @@ TEST_P(Test_TensorFlow_layers, lstm) ...@@ -673,7 +686,8 @@ TEST_P(Test_TensorFlow_layers, lstm)
TEST_P(Test_TensorFlow_layers, split) TEST_P(Test_TensorFlow_layers, split)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE) if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD &&
getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2);
runTensorFlowNet("split"); runTensorFlowNet("split");
if (backend == DNN_BACKEND_INFERENCE_ENGINE) if (backend == DNN_BACKEND_INFERENCE_ENGINE)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment