Commit cfc78194 authored by Lubov Batanina's avatar Lubov Batanina Committed by Alexander Alekhin

Merge pull request #15811 from l-bat:eltwise_div

Supported ONNX Squeeze, ReduceL2 and Eltwise::DIV

* Support eltwise div

* Fix test

* OpenCL support added

* refactoring

* fix code style

* Only squeeze with axes supported
parent af233753
...@@ -62,6 +62,7 @@ public: ...@@ -62,6 +62,7 @@ public:
PROD = 0, PROD = 0,
SUM = 1, SUM = 1,
MAX = 2, MAX = 2,
DIV = 3
} op; } op;
std::vector<float> coeffs; std::vector<float> coeffs;
bool variableChannels; bool variableChannels;
...@@ -79,6 +80,8 @@ public: ...@@ -79,6 +80,8 @@ public:
op = SUM; op = SUM;
else if (operation == "max") else if (operation == "max")
op = MAX; op = MAX;
else if (operation == "div")
op = DIV;
else else
CV_Error(cv::Error::StsBadArg, "Unknown operation type \"" + operation + "\""); CV_Error(cv::Error::StsBadArg, "Unknown operation type \"" + operation + "\"");
} }
...@@ -271,6 +274,18 @@ public: ...@@ -271,6 +274,18 @@ public:
srcptr0 = (const float*)dstptr; srcptr0 = (const float*)dstptr;
} }
} }
else if( op == DIV )
{
for( k = 1; k < n; k++ )
{
const float* srcptr1 = srcs[k]->ptr<float>() + globalDelta;
for( j = 0; j < blockSize; j++ )
{
dstptr[j] = srcptr0[j]/srcptr1[j];
}
srcptr0 = (const float*)dstptr;
}
}
else if( op == MAX ) else if( op == MAX )
{ {
for( k = 1; k < n; k++ ) for( k = 1; k < n; k++ )
...@@ -393,6 +408,11 @@ public: ...@@ -393,6 +408,11 @@ public:
for (int i = 2; i < inputs.size(); ++i) for (int i = 2; i < inputs.size(); ++i)
multiply(inputs[i], outputs[0], outputs[0]); multiply(inputs[i], outputs[0], outputs[0]);
break; break;
case DIV:
divide(inputs[0], inputs[1], outputs[0]);
for (int i = 2; i < inputs.size(); ++i)
divide(outputs[0], inputs[i], outputs[0]);
break;
case MAX: case MAX:
max(inputs[0], inputs[1], outputs[0]); max(inputs[0], inputs[1], outputs[0]);
for (int i = 2; i < inputs.size(); ++i) for (int i = 2; i < inputs.size(); ++i)
...@@ -486,6 +506,8 @@ public: ...@@ -486,6 +506,8 @@ public:
ieLayer.setEltwiseType(InferenceEngine::Builder::EltwiseLayer::EltwiseType::SUM); ieLayer.setEltwiseType(InferenceEngine::Builder::EltwiseLayer::EltwiseType::SUM);
else if (op == PROD) else if (op == PROD)
ieLayer.setEltwiseType(InferenceEngine::Builder::EltwiseLayer::EltwiseType::MUL); ieLayer.setEltwiseType(InferenceEngine::Builder::EltwiseLayer::EltwiseType::MUL);
else if (op == DIV)
ieLayer.setEltwiseType(InferenceEngine::Builder::EltwiseLayer::EltwiseType::DIV);
else if (op == MAX) else if (op == MAX)
ieLayer.setEltwiseType(InferenceEngine::Builder::EltwiseLayer::EltwiseType::MAX); ieLayer.setEltwiseType(InferenceEngine::Builder::EltwiseLayer::EltwiseType::MAX);
else else
......
...@@ -520,19 +520,27 @@ void ONNXImporter::populateNet(Net dstNet) ...@@ -520,19 +520,27 @@ void ONNXImporter::populateNet(Net dstNet)
} }
else if (layer_type == "Div") else if (layer_type == "Div")
{ {
Mat blob = getBlob(node_proto, constBlobs, 1); if (constBlobs.find(node_proto.input(1)) == constBlobs.end())
CV_Assert_N(blob.type() == CV_32F, blob.total());
if (blob.total() == 1)
{ {
layerParams.set("scale", 1.0f / blob.at<float>(0)); layerParams.type = "Eltwise";
layerParams.type = "Power"; layerParams.set("operation", "div");
} }
else else
{ {
layerParams.type = "Scale"; Mat blob = getBlob(node_proto, constBlobs, 1);
divide(1.0, blob, blob); CV_Assert_N(blob.type() == CV_32F, blob.total());
layerParams.blobs.push_back(blob); if (blob.total() == 1)
layerParams.set("bias_term", false); {
layerParams.set("scale", 1.0f / blob.at<float>(0));
layerParams.type = "Power";
}
else
{
layerParams.type = "Scale";
divide(1.0, blob, blob);
layerParams.blobs.push_back(blob);
layerParams.set("bias_term", false);
}
} }
} }
else if (layer_type == "Neg") else if (layer_type == "Neg")
...@@ -771,6 +779,32 @@ void ONNXImporter::populateNet(Net dstNet) ...@@ -771,6 +779,32 @@ void ONNXImporter::populateNet(Net dstNet)
continue; continue;
} }
} }
else if (layer_type == "ReduceL2")
{
CV_Assert_N(node_proto.input_size() == 1, layerParams.has("axes"));
CV_Assert(graph_proto.node_size() > li + 1 && graph_proto.node(li + 1).op_type() == "Div");
++li;
layerParams.type = "Normalize";
DictValue axes_dict = layerParams.get("axes");
if (axes_dict.size() != 1)
CV_Error(Error::StsNotImplemented, "Multidimensional reduceL2");
int axis = axes_dict.getIntValue(0);
layerParams.set("axis",axis);
layerParams.set("end_axis", axis);
}
else if (layer_type == "Squeeze")
{
CV_Assert_N(node_proto.input_size() == 1, layerParams.has("axes"));
DictValue axes_dict = layerParams.get("axes");
if (axes_dict.size() != 1)
CV_Error(Error::StsNotImplemented, "Multidimensional squeeze");
int axis = axes_dict.getIntValue(0);
layerParams.set("axis", axis - 1);
layerParams.set("end_axis", axis);
layerParams.type = "Flatten";
}
else if (layer_type == "Unsqueeze") else if (layer_type == "Unsqueeze")
{ {
CV_Assert(node_proto.input_size() == 1); CV_Assert(node_proto.input_size() == 1);
......
...@@ -318,6 +318,28 @@ TEST_P(Test_ONNX_layers, MultyInputs) ...@@ -318,6 +318,28 @@ TEST_P(Test_ONNX_layers, MultyInputs)
expectNoFallbacksFromIE(net); expectNoFallbacksFromIE(net);
} }
TEST_P(Test_ONNX_layers, Div)
{
const String model = _tf("models/div.onnx");
Net net = readNetFromONNX(model);
ASSERT_FALSE(net.empty());
net.setPreferableBackend(backend);
net.setPreferableTarget(target);
Mat inp1 = blobFromNPY(_tf("data/input_div_0.npy"));
Mat inp2 = blobFromNPY(_tf("data/input_div_1.npy"));
Mat ref = blobFromNPY(_tf("data/output_div.npy"));
checkBackend(&inp1, &ref);
net.setInput(inp1, "0");
net.setInput(inp2, "1");
Mat out = net.forward();
normAssert(ref, out, "", default_l1, default_lInf);
expectNoFallbacksFromIE(net);
}
TEST_P(Test_ONNX_layers, DynamicReshape) TEST_P(Test_ONNX_layers, DynamicReshape)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE) if (backend == DNN_BACKEND_INFERENCE_ENGINE)
...@@ -333,6 +355,16 @@ TEST_P(Test_ONNX_layers, Reshape) ...@@ -333,6 +355,16 @@ TEST_P(Test_ONNX_layers, Reshape)
testONNXModels("unsqueeze"); testONNXModels("unsqueeze");
} }
TEST_P(Test_ONNX_layers, Squeeze)
{
testONNXModels("squeeze");
}
TEST_P(Test_ONNX_layers, ReduceL2)
{
testONNXModels("reduceL2");
}
TEST_P(Test_ONNX_layers, Slice) TEST_P(Test_ONNX_layers, Slice)
{ {
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment