Commit d68466bb authored by Alexander Alekhin's avatar Alexander Alekhin

Merge pull request #10940 from dkurt:dnn_tf_graph_optim

parents ab110c0a ab20d2a3
......@@ -32,7 +32,7 @@ public:
BatchNormLayerImpl(const LayerParams& params)
{
setParamsFrom(params);
CV_Assert(blobs.size() >= 3);
CV_Assert(blobs.size() >= 2);
hasWeights = params.get<bool>("has_weight", false);
hasBias = params.get<bool>("has_bias", false);
......@@ -46,8 +46,8 @@ public:
blobs[0].type() == CV_32F && blobs[1].type() == CV_32F);
float varMeanScale = 1.f;
if (!hasWeights && !hasBias) {
CV_Assert(blobs[2].type() == CV_32F);
if (!hasWeights && !hasBias && blobs.size() > 2) {
CV_Assert(blobs.size() == 3, blobs[2].type() == CV_32F);
varMeanScale = blobs[2].at<float>(0);
if (varMeanScale != 0)
varMeanScale = 1/varMeanScale;
......
This diff is collapsed.
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
// Copyright (C) 2018, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
#ifndef __OPENCV_DNN_TF_SIMPLIFIER_HPP__
#define __OPENCV_DNN_TF_SIMPLIFIER_HPP__
#include "../precomp.hpp"
#ifdef HAVE_PROTOBUF
#include "tf_io.hpp"
namespace cv { namespace dnn {
CV__DNN_EXPERIMENTAL_NS_BEGIN
void RemoveIdentityOps(tensorflow::GraphDef& net);
void simplifySubgraphs(tensorflow::GraphDef& net);
Mat getTensorContent(const tensorflow::TensorProto &tensor);
CV__DNN_EXPERIMENTAL_NS_END
}} // namespace dnn, namespace cv
#endif // HAVE_PROTOBUF
#endif // __OPENCV_DNN_TF_SIMPLIFIER_HPP__
This diff is collapsed.
......@@ -150,6 +150,9 @@ TEST_P(Test_TensorFlow_layers, batch_norm)
runTensorFlowNet("batch_norm_text", targetId, true);
runTensorFlowNet("mvn_batch_norm", targetId);
runTensorFlowNet("mvn_batch_norm_1x1", targetId);
runTensorFlowNet("unfused_batch_norm", targetId);
runTensorFlowNet("fused_batch_norm_no_gamma", targetId);
runTensorFlowNet("unfused_batch_norm_no_gamma", targetId);
}
TEST_P(Test_TensorFlow_layers, pooling)
......@@ -159,6 +162,7 @@ TEST_P(Test_TensorFlow_layers, pooling)
runTensorFlowNet("max_pool_odd_valid", targetId);
runTensorFlowNet("ave_pool_same", targetId);
runTensorFlowNet("max_pool_odd_same", targetId);
runTensorFlowNet("reduce_mean", targetId); // an average pooling over all spatial dimensions.
}
TEST_P(Test_TensorFlow_layers, deconvolution)
......@@ -185,6 +189,8 @@ TEST_P(Test_TensorFlow_layers, reshape)
runTensorFlowNet("shift_reshape_no_reorder", targetId);
runTensorFlowNet("reshape_reduce", targetId);
runTensorFlowNet("flatten", targetId, true);
runTensorFlowNet("unfused_flatten", targetId);
runTensorFlowNet("unfused_flatten_unknown_batch", targetId);
}
INSTANTIATE_TEST_CASE_P(/**/, Test_TensorFlow_layers, availableDnnTargets());
......@@ -332,6 +338,21 @@ TEST(Test_TensorFlow, slice)
runTensorFlowNet("slice_4d");
}
TEST(Test_TensorFlow, softmax)
{
runTensorFlowNet("keras_softmax");
}
TEST(Test_TensorFlow, relu6)
{
runTensorFlowNet("keras_relu6");
}
TEST(Test_TensorFlow, keras_mobilenet_head)
{
runTensorFlowNet("keras_mobilenet_head");
}
TEST(Test_TensorFlow, memory_read)
{
double l1 = 1e-5;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment