Commit 4341c6ac authored by Adam Rogowiec's avatar Adam Rogowiec Committed by Robert Kimball

[ONNX] Reduce* operations (#1562)

* ReduceSum and ReduceSumSquare ONNX operations.

* Add new reduction ops.

- ReduceLogSum,
- ReduceLogSumExp,
- ReduceMax,
- ReduceMin,
- ReduceMean,
- ReduceProd.

* Add ReduceL1 and ReduceL2

* Utility generic functions generating monotonic sequences of values.

* Review comments: return AxisSet not std::vector

* Use common functions for generating monotonic sequence.

* Review comments.
parent 3609cc74
......@@ -62,6 +62,8 @@ add_library(onnx_import STATIC
op/not.hpp
op/or.hpp
op/pow.hpp
op/reduce.cpp
op/reduce.hpp
op/relu.hpp
op/reshape.cpp
op/reshape.hpp
......@@ -78,8 +80,10 @@ add_library(onnx_import STATIC
ops_bridge.hpp
utils/broadcasting.cpp
utils/broadcasting.hpp
utils/common.hpp
utils/convpool.cpp
utils/convpool.hpp
utils/reduction.hpp
utils/reshape.cpp
utils/reshape.hpp
utils/variadic.hpp)
......
//*****************************************************************************
// Copyright 2017-2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <cstddef> // std::size_t
#include <functional> // std::multiplies
#include <iterator> // std::begin, std::end
#include <numeric> // std::accumulate
#include "ngraph/op/constant.hpp"
#include "ngraph/op/divide.hpp"
#include "ngraph/shape.hpp"
#include "reduce.hpp"
#include "utils/broadcasting.hpp"
namespace ngraph
{
namespace onnx_import
{
namespace op
{
NodeVector reduce_mean(const Node& node)
{
auto input_shape = node.get_ng_inputs().at(0)->get_shape();
auto reduction_axes = reduction::detail::get_reduction_axes(node);
std::size_t elem_count_product =
std::accumulate(std::begin(reduction_axes),
std::end(reduction_axes),
1UL,
[&input_shape](const std::size_t& a, const std::size_t& b) {
return a * input_shape.at(b);
});
auto sum_node = reduction::make_ng_reduction_op<ngraph::op::Sum>(
node, node.get_ng_inputs().at(0));
auto const_node = std::make_shared<ngraph::op::Constant>(
sum_node->get_element_type(),
Shape{},
std::vector<std::size_t>{elem_count_product});
auto broadcasted_const_node =
make_broadcast_node(const_node, sum_node->get_shape());
return {std::make_shared<ngraph::op::Divide>(sum_node, broadcasted_const_node)};
}
} // namespace op
} // namespace onnx_import
} // namespace ngraph
//*****************************************************************************
// Copyright 2017-2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <memory>
#include "ngraph/node_vector.hpp"
#include "ngraph/op/abs.hpp"
#include "ngraph/op/exp.hpp"
#include "ngraph/op/log.hpp"
#include "ngraph/op/max.hpp"
#include "ngraph/op/min.hpp"
#include "ngraph/op/multiply.hpp"
#include "ngraph/op/product.hpp"
#include "ngraph/op/sqrt.hpp"
#include "ngraph/op/sum.hpp"
#include "core/node.hpp"
#include "utils/broadcasting.hpp"
#include "utils/reduction.hpp"
namespace ngraph
{
namespace onnx_import
{
namespace op
{
/// \brief Compute the log sum of the input tensor's elements along the provided axes.
///
/// \par Overview
/// The output tensor has the same rank as the input if Node attribute keepdims
/// equals 1. If keepdims equals 0, then the output tensor have the reduced
/// dimension pruned.
///
/// \param[in] node The ONNX node representing operation.
///
/// \return The nGraph node equivalent of the ONNX operation.
///
inline NodeVector reduce_log_sum(const Node& node)
{
auto sum_node = reduction::make_ng_reduction_op<ngraph::op::Sum>(
node, node.get_ng_inputs().at(0));
return {std::make_shared<ngraph::op::Log>(sum_node)};
}
/// \brief Compute the log sum exponent of the input tensor's elements along the provided axes.
///
/// \par Overview
/// The output tensor has the same rank as the input if Node attribute keepdims
/// equals 1. If keepdims equals 0, then the output tensor have the reduced
/// dimension pruned.
///
/// \param[in] node The ONNX node representing operation.
///
/// \return The nGraph node equivalent of the ONNX operation.
///
inline NodeVector reduce_log_sum_exp(const Node& node)
{
auto exp_node = std::make_shared<ngraph::op::Exp>(node.get_ng_inputs().at(0));
auto sum_node = reduction::make_ng_reduction_op<ngraph::op::Sum>(node, exp_node);
return {std::make_shared<ngraph::op::Log>(sum_node)};
}
/// \brief Compute the L1 norm of the input tensor's element along the provided axes.
///
/// \par Overview
/// The output tensor has the same rank as the input if Node attribute keepdims
/// equals 1. If keepdims equals 0, then the output tensor have the reduced
/// dimension pruned.
///
/// \param[in] node The ONNX node representing operation.
///
/// \return The nGraph node equivalent of the ONNX operation.
///
inline NodeVector reduce_l1(const Node& node)
{
auto abs_node = std::make_shared<ngraph::op::Abs>(node.get_ng_inputs().at(0));
return {reduction::make_ng_reduction_op<ngraph::op::Sum>(node, abs_node)};
}
/// \brief Compute the L2 norm of the input tensor's element along the provided axes.
///
/// \par Overview
/// The output tensor has the same rank as the input if Node attribute keepdims
/// equals 1. If keepdims equals 0, then the output tensor have the reduced
/// dimension pruned.
///
/// \param[in] node The ONNX node representing operation.
///
/// \return The nGraph node equivalent of the ONNX operation.
///
inline NodeVector reduce_l2(const Node& node)
{
NodeVector ng_inputs{node.get_ng_inputs()};
auto square_node =
std::make_shared<ngraph::op::Multiply>(ng_inputs.at(0), ng_inputs.at(0));
auto sum_node = reduction::make_ng_reduction_op<ngraph::op::Sum>(node, square_node);
return {std::make_shared<ngraph::op::Sqrt>(sum_node)};
}
/// \brief Compute the maximum value of the input tensor's elements along the provided axes.
///
/// \par Overview
/// The output tensor has the same rank as the input if Node attribute keepdims
/// equals 1. If keepdims equals 0, then the output tensor have the reduced
/// dimension pruned.
///
/// \param[in] node The ONNX node representing operation.
///
/// \return The nGraph node equivalent of the ONNX operation.
///
inline NodeVector reduce_max(const Node& node)
{
return {reduction::make_ng_reduction_op<ngraph::op::Max>(
node, node.get_ng_inputs().at(0))};
}
/// \brief Compute the mean value of the input tensor's elements along the provided axes.
///
/// \par Overview
/// The output tensor has the same rank as the input if Node attribute keepdims
/// equals 1. If keepdims equals 0, then the output tensor have the reduced
/// dimension pruned.
///
/// \param[in] node The ONNX node representing operation.
///
/// \return The nGraph node equivalent of the ONNX operation.
///
NodeVector reduce_mean(const Node& node);
/// \brief Compute the minimum value of the input tensor's elements along the provided axes.
///
/// \par Overview
/// The output tensor has the same rank as the input if Node attribute keepdims
/// equals 1. If keepdims equals 0, then the output tensor have the reduced
/// dimension pruned.
///
/// \param[in] node The ONNX node representing operation.
///
/// \return The nGraph node equivalent of the ONNX operation.
///
inline NodeVector reduce_min(const Node& node)
{
return {reduction::make_ng_reduction_op<ngraph::op::Min>(
node, node.get_ng_inputs().at(0))};
}
/// \brief Compute the product of the input tensor's elements along the provided axes.
///
/// \par Overview
/// The output tensor has the same rank as the input if Node attribute keepdims
/// equals 1. If keepdims equals 0, then the output tensor have the reduced
/// dimension pruned.
///
/// \param[in] node The ONNX node representing operation.
///
/// \return The nGraph node equivalent of the ONNX operation.
///
inline NodeVector reduce_prod(const Node& node)
{
return {reduction::make_ng_reduction_op<ngraph::op::Product>(
node, node.get_ng_inputs().at(0))};
}
/// \brief Compute the sum of the input tensor's elements along the provided axes.
///
/// \par Overview
/// The output tensor has the same rank as the input if Node attribute keepdims
/// equals 1. If keepdims equals 0, then the output tensor have the reduced
/// dimension pruned.
///
/// \param[in] node The ONNX node representing operation.
///
/// \return The nGraph node equivalent of the ONNX operation.
///
inline NodeVector reduce_sum(const Node& node)
{
return {reduction::make_ng_reduction_op<ngraph::op::Sum>(
node, node.get_ng_inputs().at(0))};
}
/// \brief Compute the sum square of the input tensor's element along the provided axes.
///
/// \par Overview
/// The output tensor has the same rank as the input if Node attribute keepdims
/// equals 1. If keepdims equals 0, then the output tensor have the reduced
/// dimension pruned.
///
/// \param[in] node The ONNX node representing operation.
///
/// \return The nGraph node equivalent of the ONNX operation.
///
inline NodeVector reduce_sum_square(const Node& node)
{
NodeVector ng_inputs{node.get_ng_inputs()};
auto square_node =
std::make_shared<ngraph::op::Multiply>(ng_inputs.at(0), ng_inputs.at(0));
return {reduction::make_ng_reduction_op<ngraph::op::Sum>(node, square_node)};
}
} // namespace op
} // namespace onnx_import
} // namespace ngraph
......@@ -40,6 +40,7 @@
#include "op/not.hpp"
#include "op/or.hpp"
#include "op/pow.hpp"
#include "op/reduce.hpp"
#include "op/relu.hpp"
#include "op/reshape.hpp"
#include "op/softmax.hpp"
......@@ -116,6 +117,19 @@ namespace ngraph
m_map.emplace("Not", std::bind(op::logical_not, std::placeholders::_1));
m_map.emplace("Or", std::bind(op::logical_or, std::placeholders::_1));
m_map.emplace("Pow", std::bind(op::pow, std::placeholders::_1));
m_map.emplace("ReduceLogSum",
std::bind(op::reduce_log_sum, std::placeholders::_1));
m_map.emplace("ReduceLogSumExp",
std::bind(op::reduce_log_sum_exp, std::placeholders::_1));
m_map.emplace("ReduceL1", std::bind(op::reduce_l1, std::placeholders::_1));
m_map.emplace("ReduceL2", std::bind(op::reduce_l2, std::placeholders::_1));
m_map.emplace("ReduceMax", std::bind(op::reduce_max, std::placeholders::_1));
m_map.emplace("ReduceMean", std::bind(op::reduce_mean, std::placeholders::_1));
m_map.emplace("ReduceMin", std::bind(op::reduce_min, std::placeholders::_1));
m_map.emplace("ReduceProd", std::bind(op::reduce_prod, std::placeholders::_1));
m_map.emplace("ReduceSum", std::bind(op::reduce_sum, std::placeholders::_1));
m_map.emplace("ReduceSumSquare",
std::bind(op::reduce_sum_square, std::placeholders::_1));
m_map.emplace("Relu", std::bind(op::relu, std::placeholders::_1));
m_map.emplace("Reshape", std::bind(op::reshape, std::placeholders::_1));
m_map.emplace("Softmax", std::bind(op::softmax, std::placeholders::_1));
......
//*****************************************************************************
// Copyright 2017-2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <cmath> // std::floor
#include <cstddef> // std::size_t
#include <iterator> // std::begin, std::end
#include <type_traits> // std::enable_if, std::is_floating_point, std::is_integral
#include <vector>
namespace ngraph
{
namespace onnx_import
{
namespace common
{
namespace detail
{
namespace
{
/// \brief Fill specified range with monotonic sequence.
///
/// \param[in] first The iterator to the beginning of the range.
/// \param[in] last The iterator to the past the end of the range.
/// \param[in] init_value The initial value for sequence.
/// \param[in] step The step value for sequence.
///
/// \tparam ForwardIterator The forward iterator class type.
/// \tparam T The sequence value type.
///
template <typename ForwardIterator, typename T>
void fill_monotonic_range(ForwardIterator first,
ForwardIterator last,
T init_value,
T step)
{
for (; first != last; ++first, init_value += step)
{
*first = init_value;
}
}
} // namespace anonymous
} // namespace detail
/// \brief Return the monotonic sequence.
///
/// \note Specialization for integral types.
///
/// \param[in] start_value The start value of the sequence.
/// \param[in] end_value The end value of the sequence.
/// \param[in] step The step value for the sequence.
///
/// \tparam T The data value type.
///
/// \return The vector with monotonic sequence.
template <typename T,
typename std::enable_if<std::is_integral<T>::value, int>::type = 0>
std::vector<T> get_monotonic_range(T end_value, T start_value = T{0}, T step = T{1})
{
std::size_t value_count = (end_value - start_value) / step;
std::vector<T> range(value_count);
detail::fill_monotonic_range(std::begin(range), std::end(range), start_value, step);
return range;
}
/// \brief Return the monotonic sequence.
///
/// \note Specialization for floating point types.
///
/// \param[in] start_value The start value of the sequence.
/// \param[in] end_value The end value of the sequence.
/// \param[in] step The step value for the sequence.
///
/// \tparam T The data value type.
///
/// \return The vector with monotonic sequence
template <typename T,
typename std::enable_if<std::is_floating_point<T>::value, int>::type = 0>
std::vector<T> get_monotonic_range(T end_value, T start_value = T{0.f}, T step = T{1.f})
{
std::size_t value_count =
reinterpret_cast<std::size_t>(std::floor((end_value - start_value) / step));
std::vector<T> range(value_count);
detail::fill_monotonic_range(std::begin(range), std::end(range), start_value, step);
return range;
}
} // namespace common
} // namespace onnx_import
} // namespace ngraph
//*****************************************************************************
// Copyright 2017-2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <cstddef> // std::size_t
#include <cstdint> // std::int64_t
#include <iterator> // std::begin, std::end
#include <memory> // std::make_shared
#include <numeric> // std::iota
#include <string>
#include <type_traits> // std::enable_if, std::is_base_of
#include <vector>
#include "ngraph/axis_set.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/reshape.hpp"
#include "ngraph/shape.hpp"
#include "core/node.hpp"
#include "exceptions.hpp"
#include "utils/common.hpp"
#include "utils/reshape.hpp"
namespace ngraph
{
namespace onnx_import
{
namespace reduction
{
namespace detail
{
inline AxisSet get_reduction_axes(const Node& node)
{
auto reduction_axes =
node.get_attribute_value<std::vector<std::size_t>>("axes", {});
if (reduction_axes.empty())
{
reduction_axes = onnx_import::common::get_monotonic_range<std::size_t>(
node.get_ng_inputs().at(0)->get_shape().size());
}
return AxisSet{reduction_axes};
}
} // namespace detail
/// \brief Create an nGraph version of an ONNX reduction operation.
///
/// \param[in] node The node representing incoming ONNX operation.
///
/// \tparam OnnxOperator Class of an nGraph ArithmeticReduction operation
/// (e.g. Min, Max, SUm, Product).
///
/// \return nGraph node equivalent of the ONNX operation.
///
template <class OnnxOperator,
typename std::enable_if<std::is_base_of<ngraph::op::util::ArithmeticReduction,
OnnxOperator>::value,
int>::type = 0>
std::shared_ptr<ngraph::Node>
make_ng_reduction_op(const Node& node,
const std::shared_ptr<ngraph::Node>& ng_input)
{
auto data_shape = ng_input->get_shape();
auto reduction_axes = detail::get_reduction_axes(node);
if (reduction_axes.size() > data_shape.size())
{
throw error::parameter::Value(node.op_type(),
node.get_name(),
"provided reduction axes count (" +
std::to_string(reduction_axes.size()) +
") is larger than input tensor rank (" +
std::to_string(data_shape.size()) + ")");
}
auto op_node = std::make_shared<OnnxOperator>(ng_input, reduction_axes);
std::int64_t keepdims = node.get_attribute_value<std::int64_t>("keepdims", 1);
if (keepdims == 0)
{
return op_node;
}
auto output_shape = data_shape;
// flatten reduced axes and preserve original dimensions count.
for (const auto& idx : reduction_axes)
{
output_shape.at(idx) = 1;
}
return std::make_shared<ngraph::op::Reshape>(
op_node,
reshape::get_default_axis_vector(op_node->get_shape().size()),
Shape{output_shape});
}
} // namespace reduction
} // namespace onnx_import
} // namespace ngraph
......@@ -26,6 +26,7 @@
#include "ngraph/shape.hpp"
#include "exceptions.hpp"
#include "utils/common.hpp"
#include "utils/reshape.hpp"
namespace ngraph
......@@ -59,9 +60,8 @@ namespace ngraph
AxisVector get_default_axis_vector(std::size_t data_shape_size, std::size_t start_value)
{
AxisVector axis_vector(data_shape_size);
std::iota(std::begin(axis_vector), std::end(axis_vector), start_value);
return axis_vector;
return AxisVector{
common::get_monotonic_range<std::size_t>(data_shape_size, start_value)};
}
std::vector<std::size_t> infer_dimensions(const std::string& node_name,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment