Commit 56c3d47f authored by Adam Rogowiec's avatar Adam Rogowiec Committed by Scott Cyphers

Move ONNX lp-norm utility functions to core op/util. (#2881)

* Extend lp-norm functions to take bias.

* Move lp-norm utilities to nGraph core op/util.

* Move norm files to builder directory.

* Apply clang-format.

* Address review comments.

* Add using namespace std.

* Review comments.

* Fix clang errors.
parent 76fb19b0
......@@ -25,6 +25,8 @@ set (SRC
builder/autobroadcast.cpp
builder/autobroadcast.hpp
builder/make_constant.hpp
builder/norm.cpp
builder/norm.hpp
builder/numpy_transpose.cpp
builder/numpy_transpose.hpp
builder/quantization.cpp
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "norm.hpp"
#include "ngraph/op/abs.hpp"
#include "ngraph/op/add.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/op/convert.hpp"
#include "ngraph/op/multiply.hpp"
#include "ngraph/op/not_equal.hpp"
#include "ngraph/op/power.hpp"
#include "ngraph/op/sqrt.hpp"
#include "ngraph/op/sum.hpp"
#include "ngraph/shape.hpp"
using namespace std;
namespace ngraph
{
namespace builder
{
namespace detail
{
shared_ptr<Node> lp_norm(const shared_ptr<Node>& node,
size_t p_norm,
const AxisSet& reduction_axes,
float bias)
{
// In general "entrywise" lp-norm for matrix `A` is defined as following double sum:
// ||A||_p = ||vec(A)||_p = [sum_{i=1}^m sum_{j=1}^n abs(a_{i,j})^p]^{1/p}
shared_ptr<Node> abs_values{make_shared<op::Abs>(node)};
shared_ptr<Node> p_node = op::Constant::create(
node->get_element_type(),
node->get_shape(),
vector<float>(shape_size(node->get_shape()), static_cast<float>(p_norm)));
// Get inner part of equation: abs_values^p_node, then sum over reduction_axes.
shared_ptr<Node> values{make_shared<op::Power>(abs_values, p_node)};
values = make_shared<op::Sum>(values, reduction_axes);
shared_ptr<Node> bias_node{
op::Constant::create(values->get_element_type(),
values->get_shape(),
vector<float>(shape_size(values->get_shape()), bias))};
values = values + bias_node;
// Get outer part of equation: raise values to 1/p_norm exponent.
shared_ptr<Node> inv_p_node = op::Constant::create(
values->get_element_type(),
values->get_shape(),
vector<float>(shape_size(values->get_shape()), 1.f / p_norm));
return {make_shared<op::Power>(values, inv_p_node)};
}
}
shared_ptr<Node> l0_norm(const shared_ptr<Node>& node, const AxisSet& reduction_axes)
{
// L0 norm returns number of elements different from zero.
shared_ptr<Node> zero_node{
op::Constant::create(node->get_element_type(),
node->get_shape(),
vector<float>(shape_size(node->get_shape()), 0.f))};
// Convert bool values to input node data type.
shared_ptr<Node> non_zero_values = make_shared<op::Convert>(
make_shared<op::NotEqual>(node, zero_node), node->get_element_type());
return make_shared<op::Sum>(non_zero_values, reduction_axes);
}
shared_ptr<Node>
l1_norm(const shared_ptr<Node>& node, const AxisSet& reduction_axes, float bias)
{
shared_ptr<Node> values{
make_shared<op::Sum>(make_shared<op::Abs>(node), reduction_axes)};
shared_ptr<Node> bias_node{
op::Constant::create(values->get_element_type(),
values->get_shape(),
vector<float>(shape_size(values->get_shape()), bias))};
return values + bias_node;
}
shared_ptr<Node>
l2_norm(const shared_ptr<Node>& node, const AxisSet& reduction_axes, float bias)
{
shared_ptr<Node> values{make_shared<op::Sum>(node * node, reduction_axes)};
shared_ptr<Node> bias_node{
op::Constant::create(values->get_element_type(),
values->get_shape(),
vector<float>(shape_size(values->get_shape()), bias))};
return {make_shared<op::Sqrt>(values + bias_node)};
}
shared_ptr<Node> lp_norm(const shared_ptr<Node>& node,
const AxisSet& reduction_axes,
size_t p_norm,
float bias)
{
// The number of non-zero elements
if (p_norm == 0)
{
return l0_norm(node, reduction_axes);
}
// sum of absolute values.
else if (p_norm == 1)
{
return l1_norm(node, reduction_axes, bias);
}
// sqrt of sum of squares - Euclidean norm
else if (p_norm == 2)
{
return l2_norm(node, reduction_axes, bias);
}
// generic case
else
{
return detail::lp_norm(node, p_norm, reduction_axes, bias);
}
}
} // namespace builder
} // namespace ngraph
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <cstddef>
#include <memory>
#include "ngraph/axis_set.hpp"
#include "ngraph/node.hpp"
namespace ngraph
{
namespace builder
{
/// \brief Creates node which calculates L-0 norm of input tensor.
///
/// \note The L-0 norm represents the cardinality of elements different
/// from zero. This actually is not a "true" norm.
///
/// \param[in] node The input tensor node.
/// \param[in] reduction_axes The axes along which we calculate norm.
///
/// \return Node which calculates L-0 norm values.
///
std::shared_ptr<Node> l0_norm(const std::shared_ptr<Node>& node,
const AxisSet& reduction_axes);
/// \brief Creates node which calculates L-1 norm of input tensor.
///
/// \note The L-1 norm represents the sum of absolute values.
///
/// \param[in] node The input tensor node.
/// \param[in] reduction_axes The axes along which we calculate norm.
/// \param[in] bias The bias added to the calculated sum.
///
/// \return Node which calculates L-1 norm values.
///
std::shared_ptr<Node> l1_norm(const std::shared_ptr<Node>& node,
const AxisSet& reduction_axes,
float bias = 0.f);
/// \brief Calculates L-2 norm of input tensor.
///
/// \note The L-2 norm represents the square root of sum of squares of each
/// individual element.
///
/// \param[in] node The input tensor node.
/// \param[in] reduction_axes The axes along which we calculate norm.
/// \param[in] bias The bias added to the calculated sum.
///
/// \return Node which calculates L-2 norm values.
///
std::shared_ptr<Node> l2_norm(const std::shared_ptr<Node>& node,
const AxisSet& reduction_axes,
float bias = 0.f);
/// \brief Creates node which calculates L-p norm on input tensor.
///
/// \param[in] node The input nGraph tensor.
/// \param[in] reduction_axes The axes along which we calculate norm.
/// \param[in] p_norm The p norm to calculate.
/// \param[in] bias The bias added to the calculated sum.
///
/// \return Node which calculates L-p norm.
///
std::shared_ptr<Node> lp_norm(const std::shared_ptr<Node>& node,
const AxisSet& reduction_axes,
std::size_t p_norm = 2,
float bias = 0.f);
} // namespace builder
} // namespace ngraph
......@@ -185,8 +185,6 @@ add_library(onnx_import STATIC
utils/common.hpp
utils/convpool.cpp
utils/convpool.hpp
utils/norm.cpp
utils/norm.hpp
utils/reduction.cpp
utils/reduction.hpp
utils/reshape.cpp
......
......@@ -21,12 +21,12 @@
#include "exceptions.hpp"
#include "lp_pool.hpp"
#include "ngraph/axis_set.hpp"
#include "ngraph/builder/norm.hpp"
#include "ngraph/op/concat.hpp"
#include "ngraph/op/reshape.hpp"
#include "ngraph/op/util/reshape.hpp"
#include "ngraph/util.hpp"
#include "utils/common.hpp"
#include "utils/norm.hpp"
#include "utils/reshape.hpp"
namespace ngraph
......@@ -56,8 +56,8 @@ namespace ngraph
AxisSet reduction_axes{
common::get_monotonic_range<std::size_t>(orig_shape.size(), 2)};
slice =
norm::lp_norm(slice, reduction_axes, static_cast<std::size_t>(p_norm));
slice = ngraph::builder::lp_norm(
slice, reduction_axes, static_cast<std::size_t>(p_norm));
// output shape is all ones except N channel
Shape output_shape(orig_shape.size(), 1);
......
......@@ -14,10 +14,9 @@
// limitations under the License.
//*****************************************************************************
#include <cstddef> // std::size_t
#include <functional> // std::multiplies
#include <iterator> // std::begin, std::end
#include <numeric> // std::accumulate
#include <cstddef> // std::size_t
#include <iterator> // std::begin, std::end
#include <numeric> // std::accumulate
#include "ngraph/op/constant.hpp"
#include "ngraph/op/divide.hpp"
......
......@@ -16,9 +16,11 @@
#pragma once
#include <functional>
#include <memory>
#include "core/node.hpp"
#include "ngraph/builder/norm.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/abs.hpp"
#include "ngraph/op/exp.hpp"
......@@ -29,7 +31,6 @@
#include "ngraph/op/product.hpp"
#include "ngraph/op/sum.hpp"
#include "ngraph/op/util/broadcasting.hpp"
#include "utils/norm.hpp"
#include "utils/reduction.hpp"
namespace ngraph
......@@ -98,8 +99,12 @@ namespace ngraph
///
inline NodeVector reduce_l1(const Node& node)
{
auto l1_norm_reduction = std::bind(ngraph::builder::l1_norm,
std::placeholders::_1,
std::placeholders::_2,
0.f);
return {reduction::make_ng_reduction_op(
node, node.get_ng_inputs().at(0), norm::l1_norm)};
node, node.get_ng_inputs().at(0), l1_norm_reduction)};
}
/// \brief Compute the L2 norm of the input tensor's element along the provided axes.
......@@ -115,8 +120,12 @@ namespace ngraph
///
inline NodeVector reduce_l2(const Node& node)
{
auto l2_norm_reduction = std::bind(ngraph::builder::l2_norm,
std::placeholders::_1,
std::placeholders::_2,
0.f);
return {reduction::make_ng_reduction_op(
node, node.get_ng_inputs().at(0), norm::l2_norm)};
node, node.get_ng_inputs().at(0), l2_norm_reduction)};
}
/// \brief Compute the maximum value of the input tensor's elements along the provided axes.
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "norm.hpp"
#include "ngraph/op/abs.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/op/convert.hpp"
#include "ngraph/op/multiply.hpp"
#include "ngraph/op/not_equal.hpp"
#include "ngraph/op/power.hpp"
#include "ngraph/op/sqrt.hpp"
#include "ngraph/op/sum.hpp"
#include "ngraph/shape.hpp"
#include "utils/common.hpp"
namespace ngraph
{
namespace onnx_import
{
namespace norm
{
namespace detail
{
std::shared_ptr<ngraph::Node> lp_norm(const std::shared_ptr<ngraph::Node>& node,
std::size_t p_norm,
const ngraph::AxisSet& reduction_axes)
{
std::shared_ptr<ngraph::Node> abs_values{
std::make_shared<ngraph::op::Abs>(node)};
std::shared_ptr<ngraph::Node> p_node = ngraph::op::Constant::create(
node->get_element_type(),
node->get_shape(),
std::vector<float>(shape_size(node->get_shape()),
static_cast<float>(p_norm)));
std::shared_ptr<ngraph::Node> values =
std::make_shared<ngraph::op::Power>(abs_values, p_node);
values = std::make_shared<ngraph::op::Sum>(values, reduction_axes);
std::shared_ptr<ngraph::Node> inv_p_node = ngraph::op::Constant::create(
values->get_element_type(),
values->get_shape(),
std::vector<float>(shape_size(values->get_shape()), 1.f / p_norm));
return {std::make_shared<ngraph::op::Power>(values, inv_p_node)};
}
}
std::shared_ptr<ngraph::Node> l0_norm(const std::shared_ptr<ngraph::Node>& node,
const ngraph::AxisSet& reduction_axes)
{
std::shared_ptr<ngraph::Node> abs_values{std::make_shared<ngraph::op::Abs>(node)};
std::shared_ptr<ngraph::Node> zero_node{ngraph::op::Constant::create(
node->get_element_type(),
node->get_shape(),
std::vector<float>(shape_size(node->get_shape()), 0.f))};
std::shared_ptr<ngraph::Node> non_zero_values =
std::make_shared<ngraph::op::Convert>(
std::make_shared<ngraph::op::NotEqual>(abs_values, zero_node),
abs_values->get_element_type());
return std::make_shared<ngraph::op::Sum>(non_zero_values, reduction_axes);
}
std::shared_ptr<ngraph::Node> l1_norm(const std::shared_ptr<ngraph::Node>& node,
const ngraph::AxisSet& reduction_axes)
{
return std::make_shared<ngraph::op::Sum>(std::make_shared<ngraph::op::Abs>(node),
reduction_axes);
}
std::shared_ptr<ngraph::Node> l2_norm(const std::shared_ptr<ngraph::Node>& node,
const ngraph::AxisSet& reduction_axes)
{
std::shared_ptr<ngraph::Node> abs_values{std::make_shared<ngraph::op::Abs>(node)};
return {std::make_shared<ngraph::op::Sqrt>(
std::make_shared<ngraph::op::Sum>(abs_values * abs_values, reduction_axes))};
}
std::shared_ptr<ngraph::Node> lp_norm(const std::shared_ptr<ngraph::Node>& node,
const ngraph::AxisSet& reduction_axes,
std::size_t p_norm)
{
// The number of non-zero elements
if (p_norm == 0)
{
return l0_norm(node, reduction_axes);
}
// sum of absolute values.
else if (p_norm == 1)
{
return l1_norm(node, reduction_axes);
}
// sqrt of sum of squares - Euclidean norm
else if (p_norm == 2)
{
return l2_norm(node, reduction_axes);
}
// generic case
else
{
return detail::lp_norm(node, p_norm, reduction_axes);
}
}
} //namespace norm
} // namespace onnx_import
} // namespace ngraph
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <memory>
#include "ngraph/axis_set.hpp"
#include "ngraph/node.hpp"
namespace ngraph
{
namespace onnx_import
{
namespace norm
{
/// \brief Calculates L-0 norm of input tensor.
///
/// \note The L-0 norm represents the cardinality of elements different
/// from zero. This actually is not a "true" norm.
///
/// \param[in] node The input tensor node.
/// \param[in] reduction_axes The axes along which we calculate norm.
///
/// \return Node with calculated L-0 norm values.
///
std::shared_ptr<ngraph::Node> l0_norm(const std::shared_ptr<ngraph::Node>& node,
const ngraph::AxisSet& reduction_axes);
/// \brief Calculates L-1 norm of input tensor.
///
/// \note The L-1 norm represents the sum of absolute values.
///
/// \param[in] node The input tensor node.
/// \param[in] reduction_axes The axes along which we calculate norm.
///
/// \return Node with calculated L-1 norm values.
///
std::shared_ptr<ngraph::Node> l1_norm(const std::shared_ptr<ngraph::Node>& node,
const ngraph::AxisSet& reduction_axes);
/// \brief Calculates L-2 norm of input tensor.
///
/// \note The L-2 norm represents the square root of sum of squares of each
/// individual element.
///
/// \param[in] node The input tensor node.
/// \param[in] reduction_axes The axes along which we calculate norm.
///
/// \return Node with calculated L-2 norm values.
///
std::shared_ptr<ngraph::Node> l2_norm(const std::shared_ptr<ngraph::Node>& node,
const ngraph::AxisSet& reduction_axes);
/// \brief Calculates L-p norm on input tensor.
///
/// \param[in] node The input nGraph tensor.
/// \param[in] reduction_axes The axes along which we calculate norm.
/// \param[in] p_norm The p norm to calculate.
///
/// \return Resulting L-p norm.
///
std::shared_ptr<ngraph::Node> lp_norm(const std::shared_ptr<ngraph::Node>& node,
const ngraph::AxisSet& reduction_axes,
std::size_t p_norm = 2);
} //namespace norm
} // namespace onnx_import
} // namespace ngraph
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment