Commit aa37863b authored by Adam Rogowiec's avatar Adam Rogowiec Committed by Scott Cyphers

[ONNX] Batchnorm operation (#1396)

* onnx: add 'constant' operator
Signed-off-by: 's avatarArtur Wojcik <artur.wojcik@intel.com>

* onnx: getting attribute value by name
Signed-off-by: 's avatarArtur Wojcik <artur.wojcik@intel.com>

* onnx: fix code style
Signed-off-by: 's avatarArtur Wojcik <artur.wojcik@intel.com>

* onnx: fix clang compilation warnings
Signed-off-by: 's avatarArtur Wojcik <artur.wojcik@intel.com>

* onnx: exception
Signed-off-by: 's avatarArtur Wojcik <artur.wojcik@intel.com>

* onnx: add 'split' operator
Signed-off-by: 's avatarArtur Wojcik <artur.wojcik@intel.com>

* onnx: add public interface
Signed-off-by: 's avatarArtur Wojcik <artur.wojcik@intel.com>

* onnx: add initial unit test for importer
Signed-off-by: 's avatarArtur Wojcik <artur.wojcik@intel.com>

* onnx: initial implementetion of operator' set
Signed-off-by: 's avatarArtur Wojcik <artur.wojcik@intel.com>

* [WIP] Unit test for split operation.

* Fix Split Op bounds calculation + UT

* clang format

* Split Op with variable parts unit test.

* Remove unused headers

* General purpose exceptions.

* Change not_supported_error message template.

* Add new general purpose errors.

* ONNX BatchNormalization operation.

* Clang-format

* Update CMake

* Add fixed test data.

* Add missing ngraph install prefix for cmake in travis Dockerfile.

* Remove -Wno-zero-as-null-pointer-constant

* Code review

* Apply clang-format-3.9

* Add missing onnx_import interface files to CMakeList

* Clean code.

* Fix test.

* Apply clang-format-3.9

* Copyright notice format

* Remove inputs in separate files

* use all_close to compare floating point values

* missed changing one CPU to INTERPRETER for unit test
parent 5e307d6b
......@@ -35,10 +35,12 @@ add_library(onnx_import_interface OBJECT
add_library(onnx_import STATIC
onnx.pb.cc
attribute.cpp
exceptions.hpp
graph.cpp
model.hpp
node.cpp
op/add.hpp
op/batch_norm.hpp
op/constant.hpp
op/split.hpp
ops_bridge.cpp
......
/*******************************************************************************
* Copyright 2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include "ngraph/except.hpp"
namespace ngraph
{
namespace onnx_import
{
namespace error
{
struct not_supported_error : ngraph_error
{
explicit not_supported_error(const std::string& op_name,
const std::string& name,
const std::string& message)
: ngraph_error{op_name + " node (" + name + "): " + message}
{
}
};
namespace op
{
struct op_value_error : ngraph_error
{
explicit op_value_error(const std::string& op_name,
const std::string& name,
const std::string& message)
: ngraph_error{op_name + " node (" + name + "): " + message}
{
}
};
} // namespace op
} // namespace error
} // namespace onnx_import
} // namespace ngraph
/*******************************************************************************
* Copyright 2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include <memory>
#include "ngraph/frontend/onnx_import/exceptions.hpp"
#include "ngraph/frontend/onnx_import/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/batch_norm.hpp"
namespace ngraph
{
namespace onnx_import
{
namespace op
{
inline NodeVector batch_norm(const Node& node, const NodeVector& inputs)
{
auto x = inputs.at(0);
auto scale = inputs.at(1);
auto bias = inputs.at(2);
std::shared_ptr<ngraph::Node> mean{nullptr};
std::shared_ptr<ngraph::Node> var{nullptr};
int is_test{node.get_attribute_value<int>("is_test", 1)};
int spatial{node.get_attribute_value<int>("spatial", 1)};
double epsilon{node.get_attribute_value<double>("epsilon", 1e-5)};
// TODO: Implement learning mode support
// float momentum{node.get_attribute_value<float>("momentum", 0.9f)};
bool training = false;
if (!is_test)
{
throw error::not_supported_error("BatchNormalization",
node.get_name(),
"only 'is_test' mode is currently supported.");
}
if (!spatial)
{
throw error::not_supported_error("BatchNormalization",
node.get_name(),
"only 'spatial' mode is currently supported.");
}
if (inputs.size() >= 5)
{
mean = inputs.at(3);
var = inputs.at(4);
return {std::make_shared<ngraph::op::BatchNorm>(
epsilon, scale, bias, x, mean, var, training)};
}
return {std::make_shared<ngraph::op::BatchNorm>(epsilon, scale, bias, x)};
}
} // namespace op
} // namespace onnx_import
} // namespace ngraph
......@@ -19,6 +19,7 @@
#include "attribute.hpp"
#include "ngraph/frontend/onnx_import/op/add.hpp"
#include "ngraph/frontend/onnx_import/op/batch_norm.hpp"
#include "ngraph/frontend/onnx_import/op/constant.hpp"
#include "ngraph/frontend/onnx_import/op/split.hpp"
#include "ops_bridge.hpp"
......@@ -42,6 +43,11 @@ namespace ngraph
} // namespace error
NodeVector add(const Node& node) { return op::add(node); }
NodeVector batch_norm(const Node& node)
{
return op::batch_norm(node, node.get_ng_inputs());
}
NodeVector constant(const Node& node)
{
return {op::constant(node.get_attribute_value<Tensor>("value"))};
......@@ -77,6 +83,8 @@ namespace ngraph
ops_bridge()
{
m_map.emplace("Add", std::bind(add, std::placeholders::_1));
m_map.emplace("BatchNormalization",
std::bind(batch_norm, std::placeholders::_1));
m_map.emplace("Constant", std::bind(constant, std::placeholders::_1));
m_map.emplace("Split", std::bind(split, std::placeholders::_1));
}
......
 backend-test:
.
x
s
bias
mean
vary"BatchNormalizationtest_batchnorm_exampleZ
x




Z
s

Z
bias

Z
mean

Z
var

b
y




B
\ No newline at end of file
......@@ -20,8 +20,12 @@
#include "gtest/gtest.h"
#include "ngraph/frontend/onnx_import/onnx.hpp"
#include "ngraph/ngraph.hpp"
#include "util/all_close_f.hpp"
#include "util/ndarray.hpp"
#include "util/test_tools.hpp"
using namespace ngraph;
TEST(onnx, model_add_abc)
{
auto model{ngraph::onnx_import::load_onnx_model(
......@@ -39,7 +43,7 @@ TEST(onnx, model_add_abc)
auto r{backend->create_tensor(ngraph::element::f32, shape)};
backend->call(model.front(), {r}, {a, b, c});
EXPECT_EQ((std::vector<float>{6}), read_vector<float>(r));
EXPECT_TRUE(test::all_close_f((std::vector<float>{6}), read_vector<float>(r)));
}
TEST(onnx, model_add_abc_initializers)
......@@ -56,7 +60,7 @@ TEST(onnx, model_add_abc_initializers)
auto r{backend->create_tensor(ngraph::element::f32, shape)};
backend->call(model.front(), {r}, {c});
EXPECT_EQ((std::vector<float>{3, 6, 9, 12}), read_vector<float>(r));
EXPECT_TRUE(test::all_close_f((std::vector<float>{3, 6, 9, 12}), read_vector<float>(r)));
}
TEST(onnx, model_split_equal_parts_default)
......@@ -71,7 +75,7 @@ TEST(onnx, model_split_equal_parts_default)
{
auto result_vectors = execute(model[i], args, "INTERPRETER");
EXPECT_EQ(result_vectors.size(), 1);
EXPECT_EQ(expected_output[i], result_vectors.front());
EXPECT_TRUE(test::all_close_f(expected_output[i], result_vectors.front()));
}
}
......@@ -90,7 +94,7 @@ TEST(onnx, model_split_equal_parts_2d)
{
auto result_vectors = execute(model[i], args, "INTERPRETER");
EXPECT_EQ(result_vectors.size(), 1);
EXPECT_EQ(expected_output[i], result_vectors[0]);
EXPECT_TRUE(test::all_close_f(expected_output[i], result_vectors[0]));
}
}
......@@ -109,6 +113,36 @@ TEST(onnx, model_split_variable_parts_2d)
{
auto result_vectors = execute(model[i], args, "INTERPRETER");
EXPECT_EQ(result_vectors.size(), 1);
EXPECT_EQ(expected_output[i], result_vectors[0]);
EXPECT_TRUE(test::all_close_f(expected_output[i], result_vectors[0]));
}
}
TEST(onnx, model_batchnorm_default)
{
// Batch Normalization with default parameters
auto function{ngraph::onnx_import::import_onnx_function(
ngraph::file_util::path_join(SERIALIZED_ZOO, "onnx/batchnorm_default.onnx"))};
std::vector<std::vector<float>> inputs;
// input data shape (1, 2, 1, 3)
inputs.emplace_back(
ngraph::test::NDArray<float, 4>({{{{-1., 0., 1.}}, {{2., 3., 4.}}}}).get_vector());
// scale (3)
inputs.emplace_back(std::vector<float>{1., 1.5});
// bias (3)
inputs.emplace_back(std::vector<float>{0., 1.});
// mean (3)
inputs.emplace_back(std::vector<float>{0., 3});
// var (3)
inputs.emplace_back(std::vector<float>{1., 1.5});
// shape (1, 2, 1, 3)
auto expected_output = ngraph::test::NDArray<float, 4>({{{{-0.999995f, 0.f, 0.999995f}},
{{-0.22474074f, 1.f, 2.2247407f}}}})
.get_vector();
auto result_vectors = execute(function, inputs, "INTERPRETER");
EXPECT_TRUE(test::all_close_f(expected_output, result_vectors.front()));
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment