Commit 82622ac8 authored by Mateusz Bencer's avatar Mateusz Bencer Committed by Michał Karzyński

[SPEC] Implement ReduceSum:v1 and ReduceProduct:v1 (#3556)

parent dfb1476a
...@@ -252,6 +252,10 @@ set (SRC ...@@ -252,6 +252,10 @@ set (SRC
op/power.hpp op/power.hpp
op/product.cpp op/product.cpp
op/product.hpp op/product.hpp
op/reduce_prod.cpp
op/reduce_prod.hpp
op/reduce_sum.cpp
op/reduce_sum.hpp
op/quantize.cpp op/quantize.cpp
op/quantize.hpp op/quantize.hpp
op/quantized_convolution.cpp op/quantized_convolution.cpp
......
...@@ -178,6 +178,8 @@ namespace ngraph ...@@ -178,6 +178,8 @@ namespace ngraph
#include "ngraph/op/quantized_convolution.hpp" #include "ngraph/op/quantized_convolution.hpp"
#include "ngraph/op/quantized_dot.hpp" #include "ngraph/op/quantized_dot.hpp"
#include "ngraph/op/recv.hpp" #include "ngraph/op/recv.hpp"
#include "ngraph/op/reduce_prod.hpp"
#include "ngraph/op/reduce_sum.hpp"
#include "ngraph/op/relu.hpp" #include "ngraph/op/relu.hpp"
#include "ngraph/op/replace_slice.hpp" #include "ngraph/op/replace_slice.hpp"
#include "ngraph/op/reshape.hpp" #include "ngraph/op/reshape.hpp"
......
...@@ -20,27 +20,27 @@ ...@@ -20,27 +20,27 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
constexpr NodeTypeInfo op::Product::type_info; constexpr NodeTypeInfo op::v0::Product::type_info;
op::Product::Product(const Output<Node>& arg, const AxisSet& reduction_axes) op::v0::Product::Product(const Output<Node>& arg, const AxisSet& reduction_axes)
: ArithmeticReduction(arg, reduction_axes) : ArithmeticReduction(arg, reduction_axes)
{ {
constructor_validate_and_infer_types(); constructor_validate_and_infer_types();
} }
op::Product::Product(const Output<Node>& arg, const Output<Node>& reduction_axes) op::v0::Product::Product(const Output<Node>& arg, const Output<Node>& reduction_axes)
: ArithmeticReduction(arg, reduction_axes) : ArithmeticReduction(arg, reduction_axes)
{ {
constructor_validate_and_infer_types(); constructor_validate_and_infer_types();
} }
shared_ptr<Node> op::Product::copy_with_new_args(const NodeVector& new_args) const shared_ptr<Node> op::v0::Product::copy_with_new_args(const NodeVector& new_args) const
{ {
check_new_args_count(this, new_args); check_new_args_count(this, new_args);
return make_shared<Product>(new_args.at(0), get_reduction_axes()); return make_shared<op::v0::Product>(new_args.at(0), get_reduction_axes());
} }
shared_ptr<Node> op::Product::get_default_value() const shared_ptr<Node> op::v0::Product::get_default_value() const
{ {
return ngraph::make_constant_from_string("1", get_element_type(), get_shape()); return ngraph::make_constant_from_string("1", get_element_type(), get_shape());
} }
...@@ -22,33 +22,38 @@ namespace ngraph ...@@ -22,33 +22,38 @@ namespace ngraph
{ {
namespace op namespace op
{ {
/// \brief Product reduction operation. namespace v0
///
/// Reduces the tensor, eliminating the specified reduction axes by taking the product.
class Product : public util::ArithmeticReduction
{ {
public: /// \brief Product reduction operation.
NGRAPH_API
static constexpr NodeTypeInfo type_info{"Product", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a product reduction operation.
Product() = default;
/// \brief Constructs a product reduction operation.
/// ///
/// \param arg The tensor to be reduced. /// Reduces the tensor, eliminating the specified reduction axes by taking the product.
/// \param reduction_axes The axis positions (0-based) to be eliminated. class Product : public util::ArithmeticReduction
Product(const Output<Node>& arg, const AxisSet& reduction_axes); {
/// \brief Constructs a product reduction operation. public:
/// NGRAPH_API
/// \param arg The tensor to be reduced. static constexpr NodeTypeInfo type_info{"Product", 0};
/// \param reduction_axes The axis positions (0-based) to be eliminated. const NodeTypeInfo& get_type_info() const override { return type_info; }
Product(const Output<Node>& arg, const Output<Node>& reduction_axes); /// \brief Constructs a product reduction operation.
Product() = default;
/// \brief Constructs a product reduction operation.
///
/// \param arg The tensor to be reduced.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
Product(const Output<Node>& arg, const AxisSet& reduction_axes);
/// \brief Constructs a product reduction operation.
///
/// \param arg The tensor to be reduced.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
Product(const Output<Node>& arg, const Output<Node>& reduction_axes);
/// \return The default value for Product. /// \return The default value for Product.
virtual std::shared_ptr<Node> get_default_value() const override; virtual std::shared_ptr<Node> get_default_value() const override;
virtual std::shared_ptr<Node> virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override; copy_with_new_args(const NodeVector& new_args) const override;
}; };
}
// default opset version
using v0::Product;
} }
} }
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/op/reduce_prod.hpp"
#include "ngraph/graph_util.hpp"
using namespace std;
using namespace ngraph;
constexpr NodeTypeInfo op::v1::ReduceProd::type_info;
op::v1::ReduceProd::ReduceProd(const Output<Node>& arg,
const Output<Node>& reduction_axes,
bool keep_dims)
: ArithmeticReduction(arg, reduction_axes)
, m_keep_dims{keep_dims}
{
constructor_validate_and_infer_types();
}
shared_ptr<Node> op::v1::ReduceProd::get_default_value() const
{
return ngraph::make_constant_from_string("1", get_element_type(), get_shape());
}
shared_ptr<Node> op::v1::ReduceProd::copy_with_new_args(const NodeVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<ReduceProd>(new_args.at(0), new_args.at(1), m_keep_dims);
}
void op::v1::ReduceProd::validate_and_infer_types()
{
if (m_keep_dims)
{
auto reduction_axes = get_reduction_axes();
auto input_shape = get_input_partial_shape(0);
auto input_rank = input_shape.rank();
PartialShape result_shape{PartialShape::dynamic()};
if (input_rank.is_static() && reduction_axes_constant())
{
std::vector<Dimension> dims;
for (auto axis : reduction_axes)
{
NODE_VALIDATION_CHECK(this,
axis < size_t(input_rank),
"Reduction axis (",
axis,
") is out of bounds ",
"(argument shape: ",
input_shape,
", reduction axes: ",
reduction_axes,
")");
}
for (size_t i = 0; i < size_t(input_rank); i++)
{
if (reduction_axes.count(i) == 0)
{
dims.push_back(input_shape[i]);
}
else
{
dims.push_back(Dimension{1});
}
}
result_shape = PartialShape(dims);
}
set_input_is_relevant_to_shape(1);
set_output_type(0, get_input_element_type(0), result_shape);
}
else
{
ArithmeticReduction::validate_and_infer_types();
}
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/op/util/arithmetic_reduction.hpp"
namespace ngraph
{
namespace op
{
namespace v1
{
/// \brief Product reduction operation.
///
/// Reduces the tensor, eliminating the specified reduction axes by taking the product.
class ReduceProd : public util::ArithmeticReduction
{
public:
NGRAPH_API
static constexpr NodeTypeInfo type_info{"Product", 1};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a product reduction operation.
ReduceProd() = default;
/// \brief Constructs a product reduction operation.
///
/// \param arg The tensor to be reduced.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
/// \param keep_dims If set to true it holds axes that are used for reduction.
ReduceProd(const Output<Node>& arg,
const Output<Node>& reduction_axes,
bool keep_dims = false);
void validate_and_infer_types() override;
size_t get_version() const override { return 1; }
/// \return If set to 1 it holds axes that are used for reduction.
/// For each such axis, output dimension is equal to 1.
bool get_keep_dims() const { return m_keep_dims; }
/// \return The default value for Product.
virtual std::shared_ptr<Node> get_default_value() const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
private:
bool m_keep_dims;
};
}
}
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/op/reduce_sum.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/op/broadcast.hpp"
using namespace std;
using namespace ngraph;
constexpr NodeTypeInfo op::v1::ReduceSum::type_info;
op::v1::ReduceSum::ReduceSum(const Output<Node>& arg,
const Output<Node>& reduction_axes,
bool keep_dims)
: ArithmeticReduction(arg, reduction_axes)
, m_keep_dims{keep_dims}
{
constructor_validate_and_infer_types();
}
shared_ptr<Node> op::v1::ReduceSum::get_default_value() const
{
return ngraph::make_constant_from_string("0", get_element_type(), get_shape());
}
shared_ptr<Node> op::v1::ReduceSum::copy_with_new_args(const NodeVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<ReduceSum>(new_args.at(0), new_args.at(1), m_keep_dims);
}
void op::v1::ReduceSum::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas)
{
auto delta = deltas.at(0);
auto x = input_value(0);
auto& x_shape = x.get_shape();
adjoints.add_delta(x, make_shared<op::Broadcast>(delta, x_shape, get_reduction_axes()));
}
void op::v1::ReduceSum::validate_and_infer_types()
{
if (m_keep_dims)
{
auto reduction_axes = get_reduction_axes();
auto input_shape = get_input_partial_shape(0);
auto input_rank = input_shape.rank();
PartialShape result_shape{PartialShape::dynamic()};
if (input_rank.is_static() && reduction_axes_constant())
{
std::vector<Dimension> dims;
for (auto axis : reduction_axes)
{
NODE_VALIDATION_CHECK(this,
axis < size_t(input_rank),
"Reduction axis (",
axis,
") is out of bounds ",
"(argument shape: ",
input_shape,
", reduction axes: ",
reduction_axes,
")");
}
for (size_t i = 0; i < size_t(input_rank); i++)
{
if (reduction_axes.count(i) == 0)
{
dims.push_back(input_shape[i]);
}
else
{
dims.push_back(Dimension{1});
}
}
result_shape = PartialShape(dims);
}
set_input_is_relevant_to_shape(1);
set_output_type(0, get_input_element_type(0), result_shape);
}
else
{
ArithmeticReduction::validate_and_infer_types();
}
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/axis_set.hpp"
#include "ngraph/op/util/arithmetic_reduction.hpp"
namespace ngraph
{
namespace op
{
namespace v1
{
// clang-format off
/// \brief Tensor sum operation.
///
/// Element-wise sums the input tensor, eliminating the specified reduction axes.
/// For example:
///
/// \f[
/// \mathit{sum}\left(\{0\},
/// \left[ \begin{array}{ccc}
/// 1 & 2 \\ 3 & 4 \\ 5 & 6 \end{array} \right]\right) =
/// \left[ (1 + 3 + 5), (2 + 4 + 6) \right] =
/// \left[ 9, 12 \right]~~~\text{(dimension 0 (rows) is eliminated)}
/// \f]
///
/// \f[
/// \mathit{sum}\left(\{1\},
/// \left[ \begin{array}{ccc}
/// 1 & 2 \\ 3 & 4 \\ 5 & 6 \end{array} \right]\right) =
/// \left[ (1 + 2), (3 + 4), (5 + 6) \right] =
/// \left[ 3, 7, 11 \right]~~~\text{(dimension 1 (columns) is eliminated)}
/// \f]
///
/// \f[
/// \mathit{sum}\left(\{0,1\},
/// \left[ \begin{array}{ccc}
/// 1 & 2 \\ 3 & 4 \\ 5 & 6 \end{array} \right]\right) =
/// (1 + 2) + (3 + 4) + (5 + 6) =
/// 21~~~\text{(both dimensions (rows and columns) are eliminated)}
/// \f]
///
/// ## Parameters
///
/// | | Description |
/// | -------------------- | ---------------------------------------- |
/// | `reduction_axes` | The axes to eliminate through summation. |
/// | `keep_dims` | If set to 1 it holds axes that are used for reduction. |
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ------------------------------------------------------ |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | An input tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ----------------------------------------- | ---------------------------------------------------------------------------------------------------------------- |
/// | \f$N[\textit{delete}(A,d_1,\dots,d_n)]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with the `reduction_axes` \f$A\f$ eliminated by summation. |
// clang-format off
class ReduceSum : public util::ArithmeticReduction
{
public:
NGRAPH_API
static constexpr NodeTypeInfo type_info{"Sum", 1};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a summation operation.
ReduceSum() = default;
/// \brief Constructs a summation operation.
///
/// \param arg The tensor to be summed.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
/// \param keep_dims If set to 1 it holds axes that are used for reduction.
ReduceSum(const Output<Node>& arg,
const Output<Node>& reduction_axes,
bool keep_dims = false);
void validate_and_infer_types() override;
size_t get_version() const override { return 1; }
/// \return If set to 1 it holds axes that are used for reduction.
/// For each such axis, output dimension is equal to 1.
bool get_keep_dims() const { return m_keep_dims; }
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
/// \return The default value for Sum.
virtual std::shared_ptr<Node> get_default_value() const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
private:
bool m_keep_dims;
};
}
}
}
...@@ -21,15 +21,15 @@ ...@@ -21,15 +21,15 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
constexpr NodeTypeInfo op::Sum::type_info; constexpr NodeTypeInfo op::v0::Sum::type_info;
op::Sum::Sum(const Output<Node>& arg, const AxisSet& reduction_axes) op::v0::Sum::Sum(const Output<Node>& arg, const AxisSet& reduction_axes)
: ArithmeticReduction(arg, reduction_axes) : ArithmeticReduction(arg, reduction_axes)
{ {
constructor_validate_and_infer_types(); constructor_validate_and_infer_types();
} }
op::Sum::Sum(const Output<Node>& arg, const Output<Node>& reduction_axes) op::v0::Sum::Sum(const Output<Node>& arg, const Output<Node>& reduction_axes)
: ArithmeticReduction(arg, reduction_axes) : ArithmeticReduction(arg, reduction_axes)
{ {
constructor_validate_and_infer_types(); constructor_validate_and_infer_types();
...@@ -38,10 +38,10 @@ op::Sum::Sum(const Output<Node>& arg, const Output<Node>& reduction_axes) ...@@ -38,10 +38,10 @@ op::Sum::Sum(const Output<Node>& arg, const Output<Node>& reduction_axes)
shared_ptr<Node> op::Sum::copy_with_new_args(const NodeVector& new_args) const shared_ptr<Node> op::Sum::copy_with_new_args(const NodeVector& new_args) const
{ {
check_new_args_count(this, new_args); check_new_args_count(this, new_args);
return make_shared<Sum>(new_args.at(0), new_args.at(1)); return make_shared<op::v0::Sum>(new_args.at(0), new_args.at(1));
} }
void op::Sum::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) void op::v0::Sum::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas)
{ {
auto delta = deltas.at(0); auto delta = deltas.at(0);
...@@ -51,7 +51,7 @@ void op::Sum::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& ...@@ -51,7 +51,7 @@ void op::Sum::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector&
adjoints.add_delta(x, make_shared<op::Broadcast>(delta, x_shape, get_reduction_axes())); adjoints.add_delta(x, make_shared<op::Broadcast>(delta, x_shape, get_reduction_axes()));
} }
shared_ptr<Node> op::Sum::get_default_value() const shared_ptr<Node> op::v0::Sum::get_default_value() const
{ {
return ngraph::make_constant_from_string("0", get_element_type(), get_shape()); return ngraph::make_constant_from_string("0", get_element_type(), get_shape());
} }
...@@ -24,82 +24,87 @@ namespace ngraph ...@@ -24,82 +24,87 @@ namespace ngraph
{ {
namespace op namespace op
{ {
// clang-format off namespace v0
/// \brief Tensor sum operation.
///
/// Element-wise sums the input tensor, eliminating the specified reduction axes.
/// For example:
///
/// \f[
/// \mathit{sum}\left(\{0\},
/// \left[ \begin{array}{ccc}
/// 1 & 2 \\ 3 & 4 \\ 5 & 6 \end{array} \right]\right) =
/// \left[ (1 + 3 + 5), (2 + 4 + 6) \right] =
/// \left[ 9, 12 \right]~~~\text{(dimension 0 (rows) is eliminated)}
/// \f]
///
/// \f[
/// \mathit{sum}\left(\{1\},
/// \left[ \begin{array}{ccc}
/// 1 & 2 \\ 3 & 4 \\ 5 & 6 \end{array} \right]\right) =
/// \left[ (1 + 2), (3 + 4), (5 + 6) \right] =
/// \left[ 3, 7, 11 \right]~~~\text{(dimension 1 (columns) is eliminated)}
/// \f]
///
/// \f[
/// \mathit{sum}\left(\{0,1\},
/// \left[ \begin{array}{ccc}
/// 1 & 2 \\ 3 & 4 \\ 5 & 6 \end{array} \right]\right) =
/// (1 + 2) + (3 + 4) + (5 + 6) =
/// 21~~~\text{(both dimensions (rows and columns) are eliminated)}
/// \f]
///
/// ## Parameters
///
/// | | Description |
/// | -------------------- | ---------------------------------------- |
/// | `reduction_axes` | The axes to eliminate through summation. |
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ------------------------------------------------------ |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | An input tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ----------------------------------------- | ---------------------------------------------------------------------------------------------------------------- |
/// | \f$N[\textit{delete}(A,d_1,\dots,d_n)]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with the `reduction_axes` \f$A\f$ eliminated by summation. |
// clang-format off
class Sum : public util::ArithmeticReduction
{ {
public: // clang-format off
NGRAPH_API /// \brief Tensor sum operation.
static constexpr NodeTypeInfo type_info{"Sum", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a summation operation.
Sum() = default;
/// \brief Constructs a summation operation.
/// ///
/// \param arg The tensor to be summed. /// Element-wise sums the input tensor, eliminating the specified reduction axes.
/// \param reduction_axes The axis positions (0-based) to be eliminated. /// For example:
Sum(const Output<Node>& arg, const AxisSet& reduction_axes);
/// \brief Constructs a summation operation.
/// ///
/// \param arg The tensor to be summed. /// \f[
/// \param reduction_axes The axis positions (0-based) to be eliminated. /// \mathit{sum}\left(\{0\},
Sum(const Output<Node>& arg, const Output<Node>& reduction_axes); /// \left[ \begin{array}{ccc}
/// 1 & 2 \\ 3 & 4 \\ 5 & 6 \end{array} \right]\right) =
/// \left[ (1 + 3 + 5), (2 + 4 + 6) \right] =
/// \left[ 9, 12 \right]~~~\text{(dimension 0 (rows) is eliminated)}
/// \f]
///
/// \f[
/// \mathit{sum}\left(\{1\},
/// \left[ \begin{array}{ccc}
/// 1 & 2 \\ 3 & 4 \\ 5 & 6 \end{array} \right]\right) =
/// \left[ (1 + 2), (3 + 4), (5 + 6) \right] =
/// \left[ 3, 7, 11 \right]~~~\text{(dimension 1 (columns) is eliminated)}
/// \f]
///
/// \f[
/// \mathit{sum}\left(\{0,1\},
/// \left[ \begin{array}{ccc}
/// 1 & 2 \\ 3 & 4 \\ 5 & 6 \end{array} \right]\right) =
/// (1 + 2) + (3 + 4) + (5 + 6) =
/// 21~~~\text{(both dimensions (rows and columns) are eliminated)}
/// \f]
///
/// ## Parameters
///
/// | | Description |
/// | -------------------- | ---------------------------------------- |
/// | `reduction_axes` | The axes to eliminate through summation. |
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ------------------------------------------------------ |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | An input tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ----------------------------------------- | ---------------------------------------------------------------------------------------------------------------- |
/// | \f$N[\textit{delete}(A,d_1,\dots,d_n)]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with the `reduction_axes` \f$A\f$ eliminated by summation. |
// clang-format off
class Sum : public util::ArithmeticReduction
{
public:
NGRAPH_API
static constexpr NodeTypeInfo type_info{ "Sum", 0 };
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a summation operation.
Sum() = default;
/// \brief Constructs a summation operation.
///
/// \param arg The tensor to be summed.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
Sum(const Output<Node>& arg, const AxisSet& reduction_axes);
/// \brief Constructs a summation operation.
///
/// \param arg The tensor to be summed.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
Sum(const Output<Node>& arg, const Output<Node>& reduction_axes);
virtual std::shared_ptr<Node> virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override; copy_with_new_args(const NodeVector& new_args) const override;
/// \return The default value for Sum. /// \return The default value for Sum.
virtual std::shared_ptr<Node> get_default_value() const override; virtual std::shared_ptr<Node> get_default_value() const override;
protected: protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints, virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override; const NodeVector& deltas) override;
}; };
}
// default opset version
using v0::Sum;
} }
} }
...@@ -19,7 +19,11 @@ ...@@ -19,7 +19,11 @@
#include "ngraph/op/gather.hpp" #include "ngraph/op/gather.hpp"
#include "ngraph/op/get_output_element.hpp" #include "ngraph/op/get_output_element.hpp"
#include "ngraph/op/pad.hpp" #include "ngraph/op/pad.hpp"
#include "ngraph/op/product.hpp"
#include "ngraph/op/reduce_prod.hpp"
#include "ngraph/op/reduce_sum.hpp"
#include "ngraph/op/softmax.hpp" #include "ngraph/op/softmax.hpp"
#include "ngraph/op/sum.hpp"
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
...@@ -94,6 +98,24 @@ bool pass::Opset1Upgrade::run_on_node(shared_ptr<Node> node) ...@@ -94,6 +98,24 @@ bool pass::Opset1Upgrade::run_on_node(shared_ptr<Node> node)
modified = true; modified = true;
break; break;
} }
case OP_TYPEID::Product:
{
bool keep_dims = false;
auto replacement_node = make_shared<op::v1::ReduceProd>(
node->input(0).get_source_output(), node->input(1).get_source_output(), keep_dims);
replace_node(node, replacement_node);
modified = true;
break;
}
case OP_TYPEID::Sum:
{
bool keep_dims = false;
auto replacement_node = make_shared<op::v1::ReduceSum>(
node->input(0).get_source_output(), node->input(1).get_source_output(), keep_dims);
replace_node(node, replacement_node);
modified = true;
break;
}
case OP_TYPEID::Pad: case OP_TYPEID::Pad:
{ {
auto tmp = dynamic_cast<const op::v0::Pad*>(node.get()); auto tmp = dynamic_cast<const op::v0::Pad*>(node.get());
......
...@@ -23,6 +23,8 @@ ...@@ -23,6 +23,8 @@
#include "ngraph/node.hpp" #include "ngraph/node.hpp"
#include "ngraph/op/gather.hpp" #include "ngraph/op/gather.hpp"
#include "ngraph/op/pad.hpp" #include "ngraph/op/pad.hpp"
#include "ngraph/op/product.hpp"
#include "ngraph/op/sum.hpp"
#include "ngraph/runtime/cpu/cpu_external_function.hpp" #include "ngraph/runtime/cpu/cpu_external_function.hpp"
#include "ngraph/runtime/cpu/cpu_tensor_view_wrapper.hpp" #include "ngraph/runtime/cpu/cpu_tensor_view_wrapper.hpp"
...@@ -77,7 +79,6 @@ namespace ngraph ...@@ -77,7 +79,6 @@ namespace ngraph
class Reshape; class Reshape;
class Sign; class Sign;
class Slice; class Slice;
class Sum;
class Exp; class Exp;
class EmbeddingLookup; class EmbeddingLookup;
class Sin; class Sin;
...@@ -130,7 +131,6 @@ namespace ngraph ...@@ -130,7 +131,6 @@ namespace ngraph
class AvgPoolBackprop; class AvgPoolBackprop;
class MaxPoolBackprop; class MaxPoolBackprop;
class MaxPoolWithIndicesBackprop; class MaxPoolWithIndicesBackprop;
class Product;
class Max; class Max;
class Erf; class Erf;
class Min; class Min;
......
...@@ -122,6 +122,8 @@ ...@@ -122,6 +122,8 @@
#include "ngraph/op/quantized_convolution.hpp" #include "ngraph/op/quantized_convolution.hpp"
#include "ngraph/op/quantized_dot.hpp" #include "ngraph/op/quantized_dot.hpp"
#include "ngraph/op/recv.hpp" #include "ngraph/op/recv.hpp"
#include "ngraph/op/reduce_prod.hpp"
#include "ngraph/op/reduce_sum.hpp"
#include "ngraph/op/relu.hpp" #include "ngraph/op/relu.hpp"
#include "ngraph/op/replace_slice.hpp" #include "ngraph/op/replace_slice.hpp"
#include "ngraph/op/reshape.hpp" #include "ngraph/op/reshape.hpp"
...@@ -1630,8 +1632,19 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js) ...@@ -1630,8 +1632,19 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
} }
case OP_TYPEID::Product: case OP_TYPEID::Product:
{ {
auto reduction_axes = deserialize_axis_set(node_js.at("reduction_axes")); if (op_version == 0)
node = make_shared<op::Product>(args[0], reduction_axes); {
auto reduction_axes = deserialize_axis_set(node_js.at("reduction_axes"));
if (reduction_axes.empty())
node = make_shared<op::v0::Product>(args[0], args[1]);
else
node = make_shared<op::v0::Product>(args[0], reduction_axes);
}
if (op_version == 1)
{
auto keep_dims = node_js.at("keep_dims").get<bool>();
node = make_shared<op::v1::ReduceProd>(args[0], args[1], keep_dims);
}
break; break;
} }
case OP_TYPEID::Quantize: case OP_TYPEID::Quantize:
...@@ -1919,8 +1932,19 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js) ...@@ -1919,8 +1932,19 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
} }
case OP_TYPEID::Sum: case OP_TYPEID::Sum:
{ {
auto reduction_axes = deserialize_axis_set(node_js.at("reduction_axes")); if (op_version == 0)
node = make_shared<op::Sum>(args[0], reduction_axes); {
auto reduction_axes = deserialize_axis_set(node_js.at("reduction_axes"));
if (reduction_axes.empty())
node = make_shared<op::v0::Sum>(args[0], args[1]);
else
node = make_shared<op::v0::Sum>(args[0], reduction_axes);
}
if (op_version == 1)
{
auto keep_dims = node_js.at("keep_dims").get<bool>();
node = make_shared<op::v1::ReduceSum>(args[0], args[1], keep_dims);
}
break; break;
} }
case OP_TYPEID::Tan: case OP_TYPEID::Tan:
...@@ -2751,8 +2775,15 @@ json JSONSerializer::serialize_node(const Node& n) ...@@ -2751,8 +2775,15 @@ json JSONSerializer::serialize_node(const Node& n)
} }
case OP_TYPEID::Product: case OP_TYPEID::Product:
{ {
auto tmp = dynamic_cast<const op::Product*>(&n); if (op_version == 0)
node["reduction_axes"] = serialize_axis_set(tmp->get_reduction_axes()); {
break;
}
if (op_version == 1)
{
auto tmp = dynamic_cast<const op::v1::ReduceProd*>(&n);
node["keep_dims"] = tmp->get_keep_dims();
}
break; break;
} }
case OP_TYPEID::Power: case OP_TYPEID::Power:
...@@ -2950,8 +2981,15 @@ json JSONSerializer::serialize_node(const Node& n) ...@@ -2950,8 +2981,15 @@ json JSONSerializer::serialize_node(const Node& n)
} }
case OP_TYPEID::Sum: case OP_TYPEID::Sum:
{ {
auto tmp = dynamic_cast<const op::Sum*>(&n); if (op_version == 0)
node["reduction_axes"] = serialize_axis_set(tmp->get_reduction_axes()); {
break;
}
if (op_version == 1)
{
auto tmp = dynamic_cast<const op::v1::ReduceSum*>(&n);
node["keep_dims"] = tmp->get_keep_dims();
}
break; break;
} }
case OP_TYPEID::Softmax: case OP_TYPEID::Softmax:
......
...@@ -69,6 +69,9 @@ set(SRC ...@@ -69,6 +69,9 @@ set(SRC
node_input_output.cpp node_input_output.cpp
nop_elimination.cpp nop_elimination.cpp
op.cpp op.cpp
opset_pass/sum_opset_pass.cpp
opset_pass/product_opset_pass.cpp
opset_pass/softmax_opset_pass.cpp
opset_pass/softmax_opset_pass.cpp opset_pass/softmax_opset_pass.cpp
opset_pass/gather_opset_pass.cpp opset_pass/gather_opset_pass.cpp
opset_pass/pad_opset_pass.cpp opset_pass/pad_opset_pass.cpp
...@@ -153,6 +156,8 @@ set(SRC ...@@ -153,6 +156,8 @@ set(SRC
type_prop/squared_difference.cpp type_prop/squared_difference.cpp
type_prop/squeeze.cpp type_prop/squeeze.cpp
type_prop/sum.cpp type_prop/sum.cpp
type_prop/reduce_prod.cpp
type_prop/reduce_sum.cpp
type_prop/tile.cpp type_prop/tile.cpp
type_prop/top_k.cpp type_prop/top_k.cpp
type_prop/transpose.cpp type_prop/transpose.cpp
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/opset1_upgrade.hpp"
#include "util/type_prop.hpp"
using namespace std;
using namespace ngraph;
TEST(serialize, opset1_product_upgrade)
{
const auto data = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3});
const AxisSet reduction_axes{1, 2};
const auto product_v0 = make_shared<op::Product>(data, reduction_axes);
const auto result = make_shared<op::Result>(product_v0);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{data});
ngraph::pass::Manager pass_manager;
pass_manager.register_pass<pass::Opset1Upgrade>();
pass_manager.run_passes(f);
const auto pass_replacement_node =
f->get_result()->input(0).get_source_output().get_node_shared_ptr();
const auto reduce_prod_v1 = static_pointer_cast<op::v1::ReduceProd>(pass_replacement_node);
EXPECT_EQ(reduce_prod_v1->description(), "Product");
EXPECT_EQ(reduce_prod_v1->get_version(), 1);
EXPECT_EQ(reduce_prod_v1->get_keep_dims(), false);
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/opset1_upgrade.hpp"
#include "util/type_prop.hpp"
using namespace std;
using namespace ngraph;
TEST(serialize, opset1_sum_upgrade)
{
const auto data = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3});
const AxisSet reduction_axes{1, 2};
const auto sum_v0 = make_shared<op::Sum>(data, reduction_axes);
const auto result = make_shared<op::Result>(sum_v0);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{data});
ngraph::pass::Manager pass_manager;
pass_manager.register_pass<pass::Opset1Upgrade>();
pass_manager.run_passes(f);
const auto pass_replacement_node =
f->get_result()->input(0).get_source_output().get_node_shared_ptr();
const auto reduce_sum_v1 = static_pointer_cast<op::v1::ReduceProd>(pass_replacement_node);
EXPECT_EQ(reduce_sum_v1->description(), "Sum");
EXPECT_EQ(reduce_sum_v1->get_version(), 1);
EXPECT_EQ(reduce_sum_v1->get_keep_dims(), false);
}
...@@ -376,6 +376,48 @@ TEST(serialize, opset1_gather) ...@@ -376,6 +376,48 @@ TEST(serialize, opset1_gather)
EXPECT_EQ(g_gather->get_version(), 1); EXPECT_EQ(g_gather->get_version(), 1);
} }
TEST(serialize, opset1_product)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3});
auto keep_dims = true;
auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{1, 2});
auto reduce_prod = make_shared<op::v1::ReduceProd>(arg, axes, keep_dims);
auto result = make_shared<op::Result>(reduce_prod);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{arg});
string s = serialize(f);
shared_ptr<Function> g = deserialize(s);
auto g_result = g->get_results().at(0);
auto g_red_prod = g_result->input(0).get_source_output().get_node_shared_ptr();
EXPECT_EQ(g_red_prod->description(), "Product");
EXPECT_EQ(g_red_prod->get_version(), 1);
EXPECT_EQ(dynamic_cast<const op::v1::ReduceProd*>(g_red_prod.get())->get_keep_dims(), 1);
EXPECT_EQ(dynamic_cast<const op::v1::ReduceProd*>(g_red_prod.get())->get_reduction_axes(),
AxisSet({1, 2}));
}
TEST(serialize, opset1_sum)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3});
auto keep_dims = true;
auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{1, 2});
auto reduce_sum = make_shared<op::v1::ReduceSum>(arg, axes, keep_dims);
auto result = make_shared<op::Result>(reduce_sum);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{arg});
string s = serialize(f);
shared_ptr<Function> g = deserialize(s);
auto g_result = g->get_results().at(0);
auto g_red_sum = g_result->input(0).get_source_output().get_node_shared_ptr();
EXPECT_EQ(g_red_sum->description(), "Sum");
EXPECT_EQ(g_red_sum->get_version(), 1);
EXPECT_EQ(dynamic_cast<const op::v1::ReduceSum*>(g_red_sum.get())->get_keep_dims(), 1);
EXPECT_EQ(dynamic_cast<const op::v1::ReduceSum*>(g_red_sum.get())->get_reduction_axes(),
AxisSet({1, 2}));
}
TEST(serialize, opset1_pad) TEST(serialize, opset1_pad)
{ {
auto arg = make_shared<op::Parameter>(element::f32, Shape{4, 5, 6}); auto arg = make_shared<op::Parameter>(element::f32, Shape{4, 5, 6});
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"
using namespace std;
using namespace ngraph;
TEST(type_prop, reduce_prod_v1_axis_out_of_range)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3});
auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{2, 3});
try
{
auto reduce_prod = make_shared<op::v1::ReduceProd>(arg, axes);
// Should have thrown, so fail if it didn't
FAIL() << "Incorrect axes values exception not thrown";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), std::string("Reduction axis ("));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, reduce_prod_v1_shape_if_keep_dims)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{3, 4, 5});
auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{1, 2});
auto keep_dims = true;
auto reduce_prod = make_shared<op::v1::ReduceProd>(arg, axes, keep_dims);
ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3, 1, 1}));
}
TEST(type_prop, reduce_prod_v1_shape_if_not_keep_dims)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{3, 4, 5});
auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{1, 2});
auto keep_dims = false;
auto reduce_prod = make_shared<op::v1::ReduceProd>(arg, axes, keep_dims);
ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3}));
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"
using namespace std;
using namespace ngraph;
TEST(type_prop, reduce_sum_v1_axis_out_of_range)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3});
auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{2, 3});
try
{
auto reduce_sum = make_shared<op::v1::ReduceSum>(arg, axes);
// Should have thrown, so fail if it didn't
FAIL() << "Incorrect axes values exception not thrown";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), std::string("Reduction axis ("));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, reduce_sum_v1_shape_if_keep_dims)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{3, 4, 5});
auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{1, 2});
auto keep_dims = true;
auto reduce_prod = make_shared<op::v1::ReduceSum>(arg, axes, keep_dims);
ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3, 1, 1}));
}
TEST(type_prop, reduce_sum_v1_shape_if_not_keep_dims)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{3, 4, 5});
auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{1, 2});
auto keep_dims = false;
auto reduce_prod = make_shared<op::v1::ReduceSum>(arg, axes, keep_dims);
ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3}));
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment