Unverified Commit 54132cc9 authored by Mateusz Bencer's avatar Mateusz Bencer Committed by GitHub

[ONNX] norm builders should produce v1 ops (#4354)

parent d28fac61
......@@ -26,6 +26,7 @@
#include "ngraph/op/reduce_sum.hpp"
#include "ngraph/op/sqrt.hpp"
#include "ngraph/op/sum.hpp"
#include "ngraph/opsets/opset1.hpp"
#include "ngraph/shape.hpp"
using namespace std;
......@@ -35,103 +36,112 @@ namespace ngraph
namespace builder
{
namespace detail
{
namespace opset1
{
shared_ptr<Node> lp_norm(const Output<Node>& value,
size_t p_norm,
const AxisSet& reduction_axes,
float bias)
{
// In general "entrywise" lp-norm for matrix `A` is defined as following double sum:
// In general "entrywise" lp-norm for matrix `A` is defined as following double
// sum:
// ||A||_p = ||vec(A)||_p = [sum_{i=1}^m sum_{j=1}^n abs(a_{i,j})^p]^{1/p}
shared_ptr<Node> abs_values{make_shared<op::Abs>(value)};
shared_ptr<Node> p_node = op::Constant::create(
value.get_element_type(),
value.get_shape(),
vector<float>(shape_size(value.get_shape()), static_cast<float>(p_norm)));
shared_ptr<Node> abs_values{make_shared<ngraph::opset1::Abs>(value)};
shared_ptr<Node> p_node = ngraph::opset1::Constant::create(
value.get_element_type(), Shape{}, {p_norm});
// Get inner part of equation: abs_values^p_node, then sum over reduction_axes.
shared_ptr<Node> values{make_shared<op::Power>(abs_values, p_node)};
values = make_shared<op::Sum>(values, reduction_axes);
shared_ptr<Node> values{make_shared<ngraph::opset1::Power>(abs_values, p_node)};
values = make_shared<ngraph::opset1::ReduceSum>(
values,
ngraph::opset1::Constant::create(
element::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()),
false);
shared_ptr<Node> bias_node{
op::Constant::create(values->get_element_type(),
values->get_shape(),
vector<float>(shape_size(values->get_shape()), bias))};
shared_ptr<Node> bias_node{ngraph::opset1::Constant::create(
values->get_element_type(), Shape{}, {bias})};
values = values + bias_node;
values = make_shared<ngraph::opset1::Add>(values, bias_node);
// Get outer part of equation: raise values to 1/p_norm exponent.
shared_ptr<Node> inv_p_node = op::Constant::create(
values->get_element_type(),
values->get_shape(),
vector<float>(shape_size(values->get_shape()), 1.f / p_norm));
shared_ptr<Node> inv_p_node = ngraph::opset1::Constant::create(
values->get_element_type(), Shape{}, {1.f / p_norm});
return {make_shared<op::Power>(values, inv_p_node)
return {make_shared<ngraph::opset1::Power>(values, inv_p_node)
->add_provenance_group_members_above({value})};
}
}
}
shared_ptr<Node> l0_norm(const Output<Node>& value, const AxisSet& reduction_axes)
shared_ptr<Node> builder::opset1::l0_norm(const Output<Node>& value,
const AxisSet& reduction_axes)
{
// L0 norm returns number of elements different from zero.
shared_ptr<Node> zero_node{
op::Constant::create(value.get_element_type(),
value.get_shape(),
vector<float>(shape_size(value.get_shape()), 0.f))};
const shared_ptr<Node> zero_node{
ngraph::opset1::Constant::create(value.get_element_type(), Shape{}, {0.f})};
// Convert bool values to input node data type.
shared_ptr<Node> non_zero_values = make_shared<op::Convert>(
make_shared<op::NotEqual>(value, zero_node), value.get_element_type());
const shared_ptr<Node> non_zero_values = make_shared<ngraph::opset1::Convert>(
make_shared<ngraph::opset1::NotEqual>(value, zero_node), value.get_element_type());
return make_shared<op::Sum>(non_zero_values, reduction_axes)
return make_shared<ngraph::opset1::ReduceSum>(
non_zero_values,
ngraph::opset1::Constant::create(
element::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()),
false)
->add_provenance_group_members_above({value});
}
shared_ptr<Node>
l1_norm(const Output<Node>& value, const AxisSet& reduction_axes, float bias)
shared_ptr<Node> builder::opset1::l1_norm(const Output<Node>& value,
const AxisSet& reduction_axes,
float bias)
{
shared_ptr<Node> values{
make_shared<op::Sum>(make_shared<op::Abs>(value), reduction_axes)};
const shared_ptr<Node> values{make_shared<ngraph::opset1::ReduceSum>(
make_shared<ngraph::opset1::Abs>(value),
ngraph::opset1::Constant::create(
element::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()),
false)};
shared_ptr<Node> bias_node{
op::Constant::create(values->get_element_type(),
values->get_shape(),
vector<float>(shape_size(values->get_shape()), bias))};
const shared_ptr<Node> bias_node{
ngraph::opset1::Constant::create(values->get_element_type(), Shape{}, {bias})};
return (values + bias_node)->add_provenance_group_members_above({value});
return make_shared<ngraph::opset1::Add>(values, bias_node)
->add_provenance_group_members_above({value});
}
shared_ptr<Node> l2_norm(const Output<Node>& value,
shared_ptr<Node> builder::opset1::l2_norm(const Output<Node>& value,
const AxisSet& reduction_axes,
float bias,
BiasMode bias_mode,
bool keep_dims)
{
shared_ptr<Node> values{make_shared<op::v1::ReduceSum>(
value * value,
make_shared<op::Constant>(
shared_ptr<Node> values{make_shared<ngraph::opset1::ReduceSum>(
make_shared<ngraph::opset1::Multiply>(value, value),
ngraph::opset1::Constant::create(
element::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()),
keep_dims)};
shared_ptr<Node> bias_node{
op::Constant::create(values->get_element_type(),
values->get_shape(),
vector<float>(shape_size(values->get_shape()), bias))};
ngraph::opset1::Constant::create(values->get_element_type(), Shape{}, {bias})};
shared_ptr<Node> result;
switch (bias_mode)
{
case BiasMode::MAX:
{
result = make_shared<op::Sqrt>(make_shared<op::Maximum>(values, bias_node));
result = make_shared<ngraph::opset1::Sqrt>(
make_shared<ngraph::opset1::Maximum>(values, bias_node));
break;
}
case BiasMode::ADD:
default: result = make_shared<op::Sqrt>(values + bias_node);
default:
result = make_shared<ngraph::opset1::Sqrt>(
make_shared<ngraph::opset1::Add>(values, bias_node));
}
return result->add_provenance_group_members_above({value});
}
shared_ptr<Node> lp_norm(const Output<Node>& value,
shared_ptr<Node> builder::opset1::lp_norm(const Output<Node>& value,
const AxisSet& reduction_axes,
size_t p_norm,
float bias)
......@@ -139,22 +149,22 @@ namespace ngraph
// The number of non-zero elements
if (p_norm == 0)
{
return l0_norm(value, reduction_axes);
return opset1::l0_norm(value, reduction_axes);
}
// sum of absolute values.
else if (p_norm == 1)
{
return l1_norm(value, reduction_axes, bias);
return opset1::l1_norm(value, reduction_axes, bias);
}
// sqrt of sum of squares - Euclidean norm
else if (p_norm == 2)
{
return l2_norm(value, reduction_axes, bias);
return opset1::l2_norm(value, reduction_axes, bias);
}
// generic case
else
{
return detail::lp_norm(value, p_norm, reduction_axes, bias);
return detail::opset1::lp_norm(value, p_norm, reduction_axes, bias);
}
}
......
......@@ -34,6 +34,8 @@ namespace ngraph
MAX
};
namespace opset1
{
/// \brief Calculates L-0 norm of input tensor.
///
/// \note The L-0 norm represents the cardinality of elements different
......@@ -42,7 +44,7 @@ namespace ngraph
/// \param[in] value The input tensor.
/// \param[in] reduction_axes The axes along which we calculate norm.
///
/// \return L-0 norm of value.
/// \return L-0 norm of value. The output sub-graph is composed of v1 ops.
///
std::shared_ptr<Node> l0_norm(const Output<Node>& value, const AxisSet& reduction_axes);
......@@ -54,7 +56,7 @@ namespace ngraph
/// \param[in] reduction_axes The axes along which we calculate norm.
/// \param[in] bias The bias added to the calculated sum.
///
/// \return L-1 norm of value.
/// \return L-1 norm of value. The output sub-graph is composed of v1 ops.
///
std::shared_ptr<Node>
l1_norm(const Output<Node>& value, const AxisSet& reduction_axes, float bias = 0.f);
......@@ -70,7 +72,7 @@ namespace ngraph
/// \param[in] bias_mode The method of bias application.
/// \param[in] keep_dims The flag indicates if axes will be removed or kept.
///
/// \return L-2 norm of value.
/// \return L-2 norm of value. The output sub-graph is composed of v1 ops.
///
std::shared_ptr<Node> l2_norm(const Output<Node>& value,
const AxisSet& reduction_axes,
......@@ -85,11 +87,12 @@ namespace ngraph
/// \param[in] p_norm The p norm to calculate.
/// \param[in] bias The bias added to the calculated sum.
///
/// \return L-p norm of value.
/// \return L-p norm of value. The output sub-graph is composed of v1 ops.
///
std::shared_ptr<Node> lp_norm(const Output<Node>& value,
const AxisSet& reduction_axes,
std::size_t p_norm = 2,
float bias = 0.f);
}
} // namespace builder
} // namespace ngraph
......@@ -55,7 +55,7 @@ namespace ngraph
<< "Invalid `p` attribute value: " << p_norm
<< "Only normalization of 1st or 2nd order is supported.";
std::shared_ptr<ngraph::Node> norm = ngraph::builder::lp_norm(
std::shared_ptr<ngraph::Node> norm = ngraph::builder::opset1::lp_norm(
data, AxisSet{normalize_axis}, static_cast<std::size_t>(p_norm));
const auto target_shape = default_opset::Constant::create(
......
......@@ -55,7 +55,7 @@ namespace ngraph
AxisSet reduction_axes{
common::get_monotonic_range<std::size_t>(orig_shape.size(), 2)};
slice = ngraph::builder::lp_norm(
slice = ngraph::builder::opset1::lp_norm(
slice, reduction_axes, static_cast<std::size_t>(p_norm));
// output shape is all ones except N channel
......
......@@ -97,7 +97,7 @@ namespace ngraph
///
inline NodeVector reduce_l1(const Node& node)
{
auto l1_norm_reduction = std::bind(ngraph::builder::l1_norm,
auto l1_norm_reduction = std::bind(ngraph::builder::opset1::l1_norm,
std::placeholders::_1,
std::placeholders::_2,
0.f);
......@@ -119,7 +119,7 @@ namespace ngraph
///
inline NodeVector reduce_l2(const Node& node)
{
auto l2_norm_reduction = std::bind(ngraph::builder::l2_norm,
auto l2_norm_reduction = std::bind(ngraph::builder::opset1::l2_norm,
std::placeholders::_1,
std::placeholders::_2,
0.f,
......
......@@ -68,7 +68,7 @@ NodeVector op::GRN::decompose_op() const
}
// Calculate l2 norm across channels.
shared_ptr<Node> norm = builder::l2_norm(data, AxisSet{1}, m_bias);
shared_ptr<Node> norm = builder::opset1::l2_norm(data, AxisSet{1}, m_bias);
// Get back reduced axis.
norm = std::make_shared<Broadcast>(norm, data.get_shape(), AxisSet{1});
data = data / norm;
......
......@@ -96,7 +96,8 @@ NodeVector op::NormalizeL2::decompose_op() const
// Calculate l2 norm across axes determined by axes input
auto builder_bias_mode =
(m_eps_mode == EpsMode::MAX) ? builder::BiasMode::MAX : builder::BiasMode::ADD;
Output<Node> norm = builder::l2_norm(data, reduction_axes, m_eps, builder_bias_mode, true);
Output<Node> norm =
builder::opset1::l2_norm(data, reduction_axes, m_eps, builder_bias_mode, true);
data = make_shared<op::Divide>(data, norm, AutoBroadcastSpec(AutoBroadcastType::NUMPY));
......
......@@ -389,7 +389,7 @@ TEST(provenance, builder)
{
auto p1 = make_shared<op::Parameter>(element::i32, PartialShape{2, 3, 4});
p1->add_provenance_tag("P1");
auto norm = builder::lp_norm(p1, {0}, 1, 0);
auto norm = builder::opset1::lp_norm(p1, {0}, 1, 0);
norm->add_provenance_tag("norm");
for (auto node : topological_sort(NodeVector{norm}))
{
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment