Commit dadb1473 authored by Liubov Batanina's avatar Liubov Batanina

Add BatchNorm3d layer

parent b998c06d
...@@ -29,6 +29,8 @@ class BatchNormLayerImpl CV_FINAL : public BatchNormLayer ...@@ -29,6 +29,8 @@ class BatchNormLayerImpl CV_FINAL : public BatchNormLayer
public: public:
Mat weights_, bias_; Mat weights_, bias_;
UMat umat_weight, umat_bias; UMat umat_weight, umat_bias;
mutable int dims;
BatchNormLayerImpl(const LayerParams& params) BatchNormLayerImpl(const LayerParams& params)
{ {
...@@ -142,6 +144,7 @@ public: ...@@ -142,6 +144,7 @@ public:
std::vector<MatShape> &outputs, std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const CV_OVERRIDE std::vector<MatShape> &internals) const CV_OVERRIDE
{ {
dims = inputs[0].size();
if (!useGlobalStats && inputs[0][0] != 1) if (!useGlobalStats && inputs[0][0] != 1)
CV_Error(Error::StsNotImplemented, "Batch normalization in training mode with batch size > 1"); CV_Error(Error::StsNotImplemented, "Batch normalization in training mode with batch size > 1");
Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals); Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals);
...@@ -150,9 +153,9 @@ public: ...@@ -150,9 +153,9 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE virtual bool supportBackend(int backendId) CV_OVERRIDE
{ {
return backendId == DNN_BACKEND_OPENCV || return (backendId == DNN_BACKEND_OPENCV && (dims == 4 || dims == 2)) ||
(backendId == DNN_BACKEND_HALIDE && haveHalide()) || (backendId == DNN_BACKEND_HALIDE && haveHalide()) ||
(backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine()); (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && (preferableTarget == DNN_TARGET_CPU || dims == 4));
} }
#ifdef HAVE_OPENCL #ifdef HAVE_OPENCL
......
...@@ -167,6 +167,13 @@ TEST_P(Test_ONNX_layers, BatchNormalization) ...@@ -167,6 +167,13 @@ TEST_P(Test_ONNX_layers, BatchNormalization)
testONNXModels("batch_norm"); testONNXModels("batch_norm");
} }
TEST_P(Test_ONNX_layers, BatchNormalization3D)
{
if (backend != DNN_BACKEND_INFERENCE_ENGINE || target != DNN_TARGET_CPU)
throw SkipTestException("Only DLIE backend on CPU is supported");
testONNXModels("batch_norm_3d");
}
TEST_P(Test_ONNX_layers, Transpose) TEST_P(Test_ONNX_layers, Transpose)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE && if (backend == DNN_BACKEND_INFERENCE_ENGINE &&
......
...@@ -188,6 +188,13 @@ TEST_P(Test_TensorFlow_layers, batch_norm) ...@@ -188,6 +188,13 @@ TEST_P(Test_TensorFlow_layers, batch_norm)
runTensorFlowNet("mvn_batch_norm_1x1"); runTensorFlowNet("mvn_batch_norm_1x1");
} }
TEST_P(Test_TensorFlow_layers, batch_norm3D)
{
if (backend != DNN_BACKEND_INFERENCE_ENGINE || target != DNN_TARGET_CPU)
throw SkipTestException("Only DLIE backend on CPU is supported");
runTensorFlowNet("batch_norm3d");
}
TEST_P(Test_TensorFlow_layers, slim_batch_norm) TEST_P(Test_TensorFlow_layers, slim_batch_norm)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE) if (backend == DNN_BACKEND_INFERENCE_ENGINE)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment