Commit fffbaa89 authored by Adam Rogowiec's avatar Adam Rogowiec Committed by Scott Cyphers

[Fused op] Normalize (#2888)

* Extend lp-norm functions to take bias.

* Move lp-norm utilities to nGraph core op/util.

* Move norm files to builder directory.

* Normalize fused operator implementation.

* Fused op boilerplate.

* Fix node validation and normalization across spatial axes.

* Add UT normalize across CHW with scalar scale.

* Fix expanding input tensor to 4D.

* Add more UT for 3D and 2D.

* Add more UT, with scale and across HW.

* Update to new localization of l2_norm function.

* Add type_prop UT and update gpu/igpu manifests.

* Apply clang-format.

* Add positive UT for type_prop.

* Update unit test manifests.

* Address review comments.

* Add using namespace std.

* Remove unnecessary std prefixes.

* Remove blacklisted unittests for GPU.

* Apply clang-format.

* Review comments.

* Fix clang errors.
parent 58dc9d09
......@@ -292,6 +292,8 @@ set (SRC
op/fused/group_conv.cpp
op/fused/mvn.cpp
op/fused/mvn.hpp
op/fused/normalize.cpp
op/fused/normalize.hpp
op/fused/prelu.cpp
op/fused/prelu.hpp
op/fused/space_to_depth.cpp
......
......@@ -103,6 +103,7 @@
#include "ngraph/op/fused/group_conv.hpp"
#include "ngraph/op/fused/hard_sigmoid.hpp"
#include "ngraph/op/fused/mvn.hpp"
#include "ngraph/op/fused/normalize.hpp"
#include "ngraph/op/fused/prelu.hpp"
#include "ngraph/op/fused/space_to_depth.hpp"
#include "ngraph/op/gather.hpp"
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <algorithm>
#include <iterator>
#include "ngraph/builder/norm.hpp"
#include "ngraph/op/divide.hpp"
#include "ngraph/op/multiply.hpp"
#include "ngraph/op/util/broadcasting.hpp"
#include "ngraph/op/util/reshape.hpp"
#include "normalize.hpp"
using namespace std;
using namespace ngraph;
op::Normalize::Normalize(const shared_ptr<ngraph::Node>& data,
const shared_ptr<ngraph::Node>& scale,
bool across_spatial,
bool channel_shared,
float eps)
: FusedOp("Normalize", {data, scale})
, m_across_spatial{across_spatial}
, m_channel_shared{channel_shared}
, m_eps{eps}
{
constructor_validate_and_infer_types();
}
void op::Normalize::pre_validate_and_infer_types()
{
const auto& data_pshape = get_input_partial_shape(0);
const auto& scale_pshape = get_input_partial_shape(1);
if (data_pshape.is_static() && scale_pshape.is_static())
{
const auto& data_shape{data_pshape.to_shape()};
const auto& scale_shape{scale_pshape.to_shape()};
// Input data must be 2, 3 or 4D tensor.
NODE_VALIDATION_CHECK(this,
(data_shape.size() >= 2 && data_shape.size() <= 4),
"Input tensor rank must be 2, 3 or 4 dimensional (actual input "
"shape: ",
data_shape,
").");
if (m_channel_shared)
{
NODE_VALIDATION_CHECK(this,
scale_shape.size() == 0,
"Scale must be a scalar if 'channels_shared' parameter is true");
}
else
{
// only HW
if (data_shape.size() == 2)
{
NODE_VALIDATION_CHECK(this,
scale_shape.size() == 0,
"Scale must be a scalar if input tensor is of rank 2.");
}
else
{
size_t n_channels = data_shape.size() == 3 ? data_shape.at(0) : data_shape.at(1);
NODE_VALIDATION_CHECK(
this,
(scale_shape.size() == 1 && scale_shape.at(0) == n_channels),
"Scale must be a vector of size of input tensor channels if input tensor is "
"of rank greater equal 3.");
}
}
}
}
NodeVector op::Normalize::decompose_op() const
{
const auto input_node{get_argument(0)};
const auto& input_shape{input_node->get_shape()};
auto data{input_node};
// Reshape to 4D tensor.
if (input_shape.size() != 4)
{
Shape data_shape(4 - input_shape.size(), 1);
copy(begin(input_shape), end(input_shape), back_inserter(data_shape));
data = util::reshape(data, data_shape);
}
// Calculate norm over CHW axes.
AxisSet reduction_axes{1, 2, 3};
if (m_across_spatial)
{
// Calculate norm only onver HW axes.
reduction_axes = AxisSet{2, 3};
}
// Calculate l2 norm across channels.
shared_ptr<Node> norm = builder::l2_norm(data, reduction_axes, m_eps);
norm = make_broadcast_node(norm, data->get_shape(), 0);
auto scale_node{get_argument(1)};
// Broadcast scale to data tensor shape.
if (m_channel_shared)
{
// Scale is a scalar.
scale_node = make_broadcast_node(scale_node, data->get_shape());
}
else
{
// Scale is a vector of size equal to C axis.
scale_node = make_broadcast_node(scale_node, data->get_shape(), 1);
}
data = data / norm * scale_node;
// get back original input tensor rank
if (input_shape.size() != 4)
{
data = util::reshape(data, input_shape);
}
return {data};
}
shared_ptr<Node> op::Normalize::copy_with_new_args(const NodeVector& new_args) const
{
if (new_args.size() != 2)
{
throw ngraph_error("Incorrect number of new arguments");
}
return make_shared<Normalize>(
new_args.at(0), new_args.at(1), m_across_spatial, m_channel_shared, m_eps);
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <memory>
#include "ngraph/node.hpp"
#include "ngraph/op/util/fused_op.hpp"
namespace ngraph
{
namespace op
{
/// \brief Normalization input tensor with L2 norm.
///
class Normalize : public ngraph::op::util::FusedOp
{
public:
///
/// \brief Constructs a Normalize operation.
///
/// \param data - Node producing the input tensor
/// \param scale - Node producing the scale tensor
/// \param across_spatial - Whether calculate norm across all channels.
/// \param channel_shared - Whether scale is shared across channels.
/// \param eps - The epsilon added to L2 norm.
///
Normalize(const std::shared_ptr<ngraph::Node>& data,
const std::shared_ptr<ngraph::Node>& scale,
bool across_spatial,
bool channel_shared,
float eps);
float get_across_spatial() const { return m_across_spatial; }
float get_channel_shared() const { return m_channel_shared; }
float get_eps() const { return m_eps; }
virtual NodeVector decompose_op() const override;
virtual void pre_validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
protected:
bool m_across_spatial{false};
bool m_channel_shared{false};
float m_eps{1.f};
};
}
}
......@@ -27,5 +27,6 @@ NGRAPH_OP(Elu, ngraph::op)
NGRAPH_OP(Gemm, ngraph::op)
NGRAPH_OP(GroupConvolution, ngraph::op)
NGRAPH_OP(MVN, ngraph::op)
NGRAPH_OP(Normalize, ngraph::op)
NGRAPH_OP(PRelu, ngraph::op)
NGRAPH_OP(SpaceToDepth, ngraph::op)
......@@ -84,6 +84,7 @@
#include "ngraph/op/fused/group_conv.hpp"
#include "ngraph/op/fused/hard_sigmoid.hpp"
#include "ngraph/op/fused/mvn.hpp"
#include "ngraph/op/fused/normalize.hpp"
#include "ngraph/op/fused/space_to_depth.hpp"
#include "ngraph/op/get_output_element.hpp"
#include "ngraph/op/greater.hpp"
......@@ -1993,6 +1994,7 @@ shared_ptr<runtime::Executable>
case OP_TYPEID::GenerateMask:
case OP_TYPEID::HardSigmoid:
case OP_TYPEID::MVN:
case OP_TYPEID::Normalize:
case OP_TYPEID::PRelu:
case OP_TYPEID::Passthrough:
case OP_TYPEID::QuantizedAvgPool:
......
......@@ -64,6 +64,14 @@ gather_nd_batch_2d_from_3d
gather_scalar_indices_no_axis
gather_scalar_indices
gather_nd_single_indices
normalize_across_chw_scalar_scale_4d
normalize_across_chw_scalar_scale_3d
normalize_across_chw_scalar_scale_2d
normalize_across_chw_w_scale
normalize_across_hw_w_scale
normalize_invalid_input_tensor_rank
normalize_invalid_scale_rank
normalize
gemm
gemm_broadcast_input_C
hardsigmoid
......
......@@ -74,6 +74,7 @@
#include "ngraph/op/fused/group_conv.hpp"
#include "ngraph/op/fused/hard_sigmoid.hpp"
#include "ngraph/op/fused/mvn.hpp"
#include "ngraph/op/fused/normalize.hpp"
#include "ngraph/op/fused/prelu.hpp"
#include "ngraph/op/fused/space_to_depth.hpp"
#include "ngraph/op/gather.hpp"
......@@ -1145,6 +1146,15 @@ static shared_ptr<ngraph::Function>
node = make_shared<op::Negative>(args[0]);
break;
}
case OP_TYPEID::Normalize:
{
bool across_spatial = node_js.at("across_spatial").get<bool>();
bool channel_shared = node_js.at("channel_shared").get<bool>();
float eps = node_js.at("eps").get<float>();
node = make_shared<op::Normalize>(
args[0], args[1], across_spatial, channel_shared, eps);
break;
}
case OP_TYPEID::NotEqual:
{
node = make_shared<op::NotEqual>(args[0], args[1]);
......@@ -1953,6 +1963,14 @@ static json write(const Node& n, bool binary_constant_data)
}
case OP_TYPEID::Negative: { break;
}
case OP_TYPEID::Normalize:
{
auto tmp = dynamic_cast<const op::Normalize*>(&n);
node["across_spatial"] = tmp->get_across_spatial();
node["channel_shared"] = tmp->get_channel_shared();
node["eps"] = tmp->get_eps();
break;
}
case OP_TYPEID::NotEqual: { break;
}
case OP_TYPEID::Not: { break;
......
......@@ -381,6 +381,159 @@ NGRAPH_TEST(${BACKEND_NAME}, depth_to_space)
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, normalize_across_chw_scalar_scale_4d)
{
Shape data_shape{1, 2, 3, 4};
auto data = make_shared<op::Parameter>(element::f32, data_shape);
auto scale = make_shared<op::Parameter>(element::f32, Shape{});
bool across_spatial{false};
bool channel_shared{true};
float eps{1e-6f};
auto normalize = make_shared<op::Normalize>(data, scale, across_spatial, channel_shared, eps);
auto function = make_shared<Function>(NodeVector{normalize}, ParameterVector{data, scale});
auto test_case = ngraph::test::NgraphTestCase(function, "${BACKEND_NAME}");
vector<float> input_data(shape_size(data_shape));
iota(begin(input_data), end(input_data), 1);
test_case.add_input<float>(input_data);
test_case.add_input<float>({2.f});
test_case.add_expected_output<float>(
data_shape, {0.02857143f, 0.05714286f, 0.08571429f, 0.11428571f, 0.14285714f, 0.17142857f,
0.2f, 0.22857143f, 0.25714286f, 0.28571429f, 0.31428571f, 0.34285714f,
0.37142857f, 0.4f, 0.42857143f, 0.45714286f, 0.48571429f, 0.51428571f,
0.54285714f, 0.57142857f, 0.6f, 0.62857143f, 0.65714286f, 0.68571429f});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, normalize_across_chw_scalar_scale_3d)
{
Shape data_shape{2, 3, 4};
auto data = make_shared<op::Parameter>(element::f32, data_shape);
auto scale = make_shared<op::Parameter>(element::f32, Shape{});
bool across_spatial{false};
bool channel_shared{true};
float eps{1e-6f};
auto normalize = make_shared<op::Normalize>(data, scale, across_spatial, channel_shared, eps);
auto function = make_shared<Function>(NodeVector{normalize}, ParameterVector{data, scale});
auto test_case = ngraph::test::NgraphTestCase(function, "${BACKEND_NAME}");
vector<float> input_data(shape_size(data_shape));
iota(begin(input_data), end(input_data), 1);
test_case.add_input<float>(input_data);
test_case.add_input<float>({2.f});
test_case.add_expected_output<float>(
data_shape, {0.02857143f, 0.05714286f, 0.08571429f, 0.11428571f, 0.14285714f, 0.17142857f,
0.2f, 0.22857143f, 0.25714286f, 0.28571429f, 0.31428571f, 0.34285714f,
0.37142857f, 0.4f, 0.42857143f, 0.45714286f, 0.48571429f, 0.51428571f,
0.54285714f, 0.57142857f, 0.6f, 0.62857143f, 0.65714286f, 0.68571429f});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, normalize_across_chw_scalar_scale_2d)
{
Shape data_shape{3, 4};
auto data = make_shared<op::Parameter>(element::f32, data_shape);
auto scale = make_shared<op::Parameter>(element::f32, Shape{});
bool across_spatial{false};
bool channel_shared{true};
float eps{1e-6f};
auto normalize = make_shared<op::Normalize>(data, scale, across_spatial, channel_shared, eps);
auto function = make_shared<Function>(NodeVector{normalize}, ParameterVector{data, scale});
auto test_case = ngraph::test::NgraphTestCase(function, "${BACKEND_NAME}");
vector<float> input_data(shape_size(data_shape));
iota(begin(input_data), end(input_data), 1);
test_case.add_input<float>(input_data);
test_case.add_input<float>({2.f});
test_case.add_expected_output<float>(data_shape,
{0.07844645,
0.15689291,
0.23533936,
0.31378582,
0.39223227,
0.47067872,
0.54912518,
0.62757163,
0.70601809,
0.78446454,
0.86291099,
0.94135745});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, normalize_across_chw_w_scale)
{
Shape data_shape{1, 2, 3, 4};
auto data = make_shared<op::Parameter>(element::f32, data_shape);
auto scale = make_shared<op::Parameter>(element::f32, Shape{2});
bool across_spatial{false};
bool channel_shared{false};
float eps{1e-6f};
auto normalize = make_shared<op::Normalize>(data, scale, across_spatial, channel_shared, eps);
auto function = make_shared<Function>(NodeVector{normalize}, ParameterVector{data, scale});
auto test_case = ngraph::test::NgraphTestCase(function, "${BACKEND_NAME}");
vector<float> input_data(shape_size(data_shape));
iota(begin(input_data), end(input_data), 1);
test_case.add_input<float>(input_data);
test_case.add_input<float>({2.f, 3.f});
test_case.add_expected_output<float>(
data_shape, {0.02857143, 0.05714286, 0.08571429, 0.11428571, 0.14285714, 0.17142857,
0.2, 0.22857143, 0.25714286, 0.28571429, 0.31428571, 0.34285714,
0.55714286, 0.6, 0.64285714, 0.68571429, 0.72857143, 0.77142857,
0.81428571, 0.85714286, 0.9, 0.94285714, 0.98571429, 1.02857143});
test_case.run();
}
// TODO lower tolerance; mismatch at 4th decimal positions
NGRAPH_TEST(DISABLED_${BACKEND_NAME}, normalize_across_hw_w_scale)
{
Shape data_shape{1, 2, 3, 4};
auto data = make_shared<op::Parameter>(element::f32, data_shape);
auto scale = make_shared<op::Parameter>(element::f32, Shape{2});
bool across_spatial{true};
bool channel_shared{false};
float eps{0.25f};
auto normalize = make_shared<op::Normalize>(data, scale, across_spatial, channel_shared, eps);
auto function = make_shared<Function>(NodeVector{normalize}, ParameterVector{data, scale});
auto test_case = ngraph::test::NgraphTestCase(function, "${BACKEND_NAME}");
vector<float> input_data(shape_size(data_shape));
iota(begin(input_data), end(input_data), 1);
test_case.add_input<float>(input_data);
test_case.add_input<float>({2.f, 3.f});
test_case.add_expected_output<float>(
data_shape, {0.07844646, 0.15689291, 0.23533936, 0.31378582, 0.39223227, 0.47067872,
0.5491252, 0.62757164, 0.7060181, 0.78446454, 0.862911, 0.94135743,
0.5982327, 0.64425063, 0.6902685, 0.7362864, 0.7823043, 0.8283222,
0.87434006, 0.920358, 0.9663758, 1.0123938, 1.0584116, 1.1044296});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, gemm)
{
auto A = make_shared<op::Parameter>(element::f64, Shape{3, 6});
......
......@@ -13969,6 +13969,131 @@ TEST(type_prop, group_conv_invalid_groups)
}
}
TEST(type_prop, normalize_invalid_input_tensor_rank)
{
Shape data_shape{1, 2, 3, 4, 5};
auto data = make_shared<op::Parameter>(element::f32, data_shape);
auto scale = make_shared<op::Parameter>(element::f32, Shape{});
bool across_spatial{false};
bool channel_shared{true};
float eps{1e-6f};
try
{
auto normalize =
make_shared<op::Normalize>(data, scale, across_spatial, channel_shared, eps);
// Should have thrown, so fail if it didn't
FAIL() << "Invalid input tensor rank.";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Input tensor rank must be 2, 3 or 4 dimensional"));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
data = make_shared<op::Parameter>(element::f32, Shape{2});
try
{
auto normalize =
make_shared<op::Normalize>(data, scale, across_spatial, channel_shared, eps);
// Should have thrown, so fail if it didn't
FAIL() << "Invalid input tensor rank.";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Input tensor rank must be 2, 3 or 4 dimensional"));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, normalize_invalid_scale_rank)
{
Shape data_shape{1, 2, 3, 4};
auto data = make_shared<op::Parameter>(element::f32, data_shape);
auto scale = make_shared<op::Parameter>(element::f32, Shape{3});
bool across_spatial{false};
bool channel_shared{true};
float eps{1e-6f};
try
{
auto normalize =
make_shared<op::Normalize>(data, scale, across_spatial, channel_shared, eps);
// Should have thrown, so fail if it didn't
FAIL() << "Invalid input tensor rank.";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Scale must be a scalar if 'channels_shared' "
"parameter is true"));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
channel_shared = false;
try
{
auto normalize =
make_shared<op::Normalize>(data, scale, across_spatial, channel_shared, eps);
// Should have thrown, so fail if it didn't
FAIL() << "Invalid input tensor rank.";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Scale must be a vector of size of input tensor "
"channels"));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
data = make_shared<op::Parameter>(element::f32, Shape{4, 3});
try
{
auto normalize =
make_shared<op::Normalize>(data, scale, across_spatial, channel_shared, eps);
// Should have thrown, so fail if it didn't
FAIL() << "Invalid input tensor rank.";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Scale must be a scalar if input tensor is of rank 2"));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, normalize)
{
Shape data_shape{2, 3, 4};
auto data = make_shared<op::Parameter>(element::f32, data_shape);
auto scale = make_shared<op::Parameter>(element::f32, Shape{2});
bool across_spatial{false};
bool channel_shared{false};
float eps{1e-6f};
auto normalize = make_shared<op::Normalize>(data, scale, across_spatial, channel_shared, eps);
EXPECT_EQ(normalize->get_element_type(), element::f32);
EXPECT_EQ(normalize->get_shape(), (Shape{2, 3, 4}));
}
TEST(type_prop, function_revalidate_and_infer)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{2, 4, 6, 8});
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment