Commit 9f2882a2 authored by Adam Procter's avatar Adam Procter Committed by Scott Cyphers

Sum operation (#196)

parent 3eaf7e6f
...@@ -39,6 +39,7 @@ set (SRC ...@@ -39,6 +39,7 @@ set (SRC
ops/reshape.cpp ops/reshape.cpp
ops/select.cpp ops/select.cpp
ops/slice.cpp ops/slice.cpp
ops/sum.cpp
ops/tuple.cpp ops/tuple.cpp
ops/unary_elementwise_arithmetic.cpp ops/unary_elementwise_arithmetic.cpp
ops/unary_elementwise_builtin.cpp ops/unary_elementwise_builtin.cpp
......
...@@ -88,6 +88,7 @@ ...@@ -88,6 +88,7 @@
#include "ngraph/ops/sinh.hpp" #include "ngraph/ops/sinh.hpp"
#include "ngraph/ops/slice.hpp" #include "ngraph/ops/slice.hpp"
#include "ngraph/ops/subtract.hpp" #include "ngraph/ops/subtract.hpp"
#include "ngraph/ops/sum.hpp"
#include "ngraph/ops/tan.hpp" #include "ngraph/ops/tan.hpp"
#include "ngraph/ops/tanh.hpp" #include "ngraph/ops/tanh.hpp"
#include "ngraph/ops/tuple.hpp" #include "ngraph/ops/tuple.hpp"
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include "ngraph/ops/sum.hpp"
#include "ngraph/function.hpp"
using namespace std;
using namespace ngraph::op;
void Sum::propagate_types()
{
if (m_arguments.size() != 1)
{
throw ngraph_error("Wrong number of arguments.");
}
auto arg_type = m_arguments.at(0)->get_value_type();
if (nullptr == arg_type)
{
throw ngraph_error("Argument to sum is missing type.");
}
auto arg_tensor_view_type = dynamic_pointer_cast<const TensorViewType>(arg_type);
if (nullptr == arg_tensor_view_type)
{
throw ngraph_error("Argument to sum is not a tensor view");
}
auto& arg_element_type = arg_tensor_view_type->get_element_type();
if (arg_element_type == element::Bool::element_type())
{
throw ngraph_error("Argument for sum must have numeric element type");
}
auto arg_shape = arg_tensor_view_type->get_shape();
for (auto axis : m_summed_axes)
{
if (axis >= arg_shape.size())
{
throw ngraph_error("Summed axis is out of bounds");
}
}
Shape result_shape;
for (size_t i = 0; i < arg_shape.size(); i++)
{
if (m_summed_axes.count(i) == 0)
{
result_shape.push_back(arg_shape.at(i));
}
}
set_value_type_checked(
make_shared<TensorViewType>(arg_tensor_view_type->get_element_type(), result_shape));
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/ops/op.hpp"
namespace ngraph
{
namespace op
{
class Sum : public Builtin
{
public:
///
/// @param arg The tensor view to be summedn.
/// @param summed_axes The axis positions (0-based) to be summed.
///
Sum(const std::shared_ptr<Node>& arg, const AxisSet& summed_axes)
: Builtin({arg})
, m_summed_axes(summed_axes)
{
}
virtual std::string description() const override { return "Sum"; }
virtual void propagate_types() override;
const AxisSet& get_summed_axes() const { return m_summed_axes; }
protected:
AxisSet m_summed_axes;
};
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/runtime/ngvm/call_frame.hpp"
#include "ngraph/runtime/ngvm/eigen/utils.hpp"
#include "ngraph/runtime/ngvm/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
{
namespace runtime
{
namespace ngvm
{
namespace eigen
{
template <typename ET>
class SumMatrixColumnsInstruction : public Instruction
{
public:
SumMatrixColumnsInstruction(const TensorViewInfo& arg,
const TensorViewInfo& out)
: m_arg(arg)
, m_out(out)
{
}
virtual void execute(CallFrame& call_frame) const override
{
EigenVector<ET>(call_frame, m_out) =
EigenMatrix<ET>(call_frame, m_arg).colwise().sum();
}
protected:
TensorViewInfo m_arg;
TensorViewInfo m_out;
};
}
}
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/runtime/ngvm/call_frame.hpp"
#include "ngraph/runtime/ngvm/eigen/utils.hpp"
#include "ngraph/runtime/ngvm/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
{
namespace runtime
{
namespace ngvm
{
namespace eigen
{
template <typename ET>
class SumMatrixRowsInstruction : public Instruction
{
public:
SumMatrixRowsInstruction(const TensorViewInfo& arg, const TensorViewInfo& out)
: m_arg(arg)
, m_out(out)
{
}
virtual void execute(CallFrame& call_frame) const override
{
EigenVector<ET>(call_frame, m_out) =
EigenMatrix<ET>(call_frame, m_arg).rowwise().sum();
}
protected:
TensorViewInfo m_arg;
TensorViewInfo m_out;
};
}
}
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/runtime/ngvm/call_frame.hpp"
#include "ngraph/runtime/ngvm/eigen/utils.hpp"
#include "ngraph/runtime/ngvm/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
{
namespace runtime
{
namespace ngvm
{
namespace eigen
{
template <typename ET>
class SumToScalarInstruction : public Instruction
{
public:
SumToScalarInstruction(const TensorViewInfo& arg, const TensorViewInfo& out)
: m_arg(arg)
, m_out(out)
{
}
virtual void execute(CallFrame& call_frame) const override
{
EigenArray1d<ET>(call_frame, m_out) =
EigenArray1d<ET>(call_frame, m_arg).sum();
}
protected:
TensorViewInfo m_arg;
TensorViewInfo m_out;
};
}
}
}
}
...@@ -56,6 +56,7 @@ ...@@ -56,6 +56,7 @@
#include "ngraph/ops/sinh.hpp" #include "ngraph/ops/sinh.hpp"
#include "ngraph/ops/slice.hpp" #include "ngraph/ops/slice.hpp"
#include "ngraph/ops/subtract.hpp" #include "ngraph/ops/subtract.hpp"
#include "ngraph/ops/sum.hpp"
#include "ngraph/ops/tan.hpp" #include "ngraph/ops/tan.hpp"
#include "ngraph/ops/tanh.hpp" #include "ngraph/ops/tanh.hpp"
#include "ngraph/ops/tuple.hpp" #include "ngraph/ops/tuple.hpp"
...@@ -105,6 +106,9 @@ ...@@ -105,6 +106,9 @@
#include "ngraph/runtime/ngvm/eigen/sin.hpp" #include "ngraph/runtime/ngvm/eigen/sin.hpp"
#include "ngraph/runtime/ngvm/eigen/sinh.hpp" #include "ngraph/runtime/ngvm/eigen/sinh.hpp"
#include "ngraph/runtime/ngvm/eigen/subtract.hpp" #include "ngraph/runtime/ngvm/eigen/subtract.hpp"
#include "ngraph/runtime/ngvm/eigen/sum_matrix_columns.hpp"
#include "ngraph/runtime/ngvm/eigen/sum_matrix_rows.hpp"
#include "ngraph/runtime/ngvm/eigen/sum_to_scalar.hpp"
#include "ngraph/runtime/ngvm/eigen/tan.hpp" #include "ngraph/runtime/ngvm/eigen/tan.hpp"
#include "ngraph/runtime/ngvm/eigen/tanh.hpp" #include "ngraph/runtime/ngvm/eigen/tanh.hpp"
#include "ngraph/runtime/ngvm/eigen/vector_slice.hpp" #include "ngraph/runtime/ngvm/eigen/vector_slice.hpp"
...@@ -824,6 +828,65 @@ ExternalFunction::OpMap& ExternalFunction::get_op_map() ...@@ -824,6 +828,65 @@ ExternalFunction::OpMap& ExternalFunction::get_op_map()
} }
}; };
REGISTER_TO_OP_MAP(op::Sum)
{
auto s = static_cast<const op::Sum*>(n);
auto s_tensor_view_type =
dynamic_pointer_cast<const TensorViewType>(s->get_value_type());
assert(nullptr != s_tensor_view_type);
auto& s_element_type = s_tensor_view_type->get_element_type();
auto s_shape = s_tensor_view_type->get_shape();
auto arg = s->get_arguments().at(0);
auto arg_type = arg->get_value_type();
auto arg_tensor_view_type = dynamic_pointer_cast<const TensorViewType>(arg_type);
assert(nullptr != arg_tensor_view_type);
auto arg_shape = arg_tensor_view_type->get_shape();
auto arg_rank = arg_shape.size();
auto& summed_axes = s->get_summed_axes();
// Trivial case: no summed axes.
if (summed_axes.size() == 0)
{
PUSH_POLYMORPHIC_INSTRUCTION(s_element_type,
"Sum has unhandled element type",
runtime::ngvm::eigen::CopyInstruction,
in.at(0).get_index(),
out.at(0).get_index());
}
// Full reduction? Then sum to scalar.
else if ((arg_rank == 1 && summed_axes == AxisSet{0}) ||
(arg_rank == 2 && summed_axes == AxisSet{0, 1}))
{
PUSH_POLYMORPHIC_INSTRUCTION(s_element_type,
"Sum has unhandled element type",
runtime::ngvm::eigen::SumToScalarInstruction,
in[0],
out[0]);
}
else if (arg_rank == 2 && summed_axes == AxisSet{1})
{
PUSH_POLYMORPHIC_INSTRUCTION(s_element_type,
"Sum has unhandled element type",
runtime::ngvm::eigen::SumMatrixRowsInstruction,
in[0],
out[0]);
}
else if (arg_rank == 2 && summed_axes == AxisSet{0})
{
PUSH_POLYMORPHIC_INSTRUCTION(s_element_type,
"Sum has unhandled element type",
runtime::ngvm::eigen::SumMatrixColumnsInstruction,
in[0],
out[0]);
}
else
{
throw ngraph_error("Sum: only vectors and matrices are currently supported");
}
};
REGISTER_TO_OP_MAP(op::Reshape) REGISTER_TO_OP_MAP(op::Reshape)
{ {
auto reshape = static_cast<const op::Reshape*>(n); auto reshape = static_cast<const op::Reshape*>(n);
......
...@@ -2274,3 +2274,207 @@ TEST(execute, tensor_constant_int64) ...@@ -2274,3 +2274,207 @@ TEST(execute, tensor_constant_int64)
std::strtol("1964", NULL, 10)}), std::strtol("1964", NULL, 10)}),
result->get_vector()); result->get_vector());
} }
// Trivial case with no summed axes.
TEST(execute, sum_trivial)
{
auto shape = Shape{2, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{}), rt, op::Parameters{A});
auto manager = runtime::Manager::get("NGVM");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
// Create some tensors for input/output
auto a = ngraph::runtime::make_tensor<element::Float32>(shape);
*a = vector<float>{1, 2, 3, 4};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape);
(*cf)({a}, {result});
ASSERT_EQ((vector<float>{1, 2, 3, 4}), result->get_vector());
}
TEST(execute, sum_to_scalar)
{
auto shape = Shape{2, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{});
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{0, 1}), rt, op::Parameters{A});
auto manager = runtime::Manager::get("NGVM");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
// Create some tensors for input/output
auto a = ngraph::runtime::make_tensor<element::Float32>(shape);
*a = vector<float>{1, 2, 3, 4};
auto result = ngraph::runtime::make_tensor<element::Float32>(Shape{});
(*cf)({a}, {result});
ASSERT_EQ((vector<float>{10}), result->get_vector());
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
// input tensors, so let's do this too.
ASSERT_EQ((vector<float>{1, 2, 3, 4}), a->get_vector());
}
TEST(execute, sum_matrix_columns)
{
auto shape_a = Shape{3, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape_a);
auto shape_rt = Shape{2};
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape_rt);
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{0}), rt, op::Parameters{A});
auto manager = runtime::Manager::get("NGVM");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
// Create some tensors for input/output
auto a = ngraph::runtime::make_tensor<element::Float32>(shape_a);
*a = vector<float>{1, 2, 3, 4, 5, 6};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape_rt);
(*cf)({a}, {result});
ASSERT_EQ((vector<float>{9, 12}), result->get_vector());
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
// input tensors, so let's do this too.
ASSERT_EQ((vector<float>{1, 2, 3, 4, 5, 6}), a->get_vector());
}
TEST(execute, sum_matrix_rows)
{
auto shape_a = Shape{3, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape_a);
auto shape_rt = Shape{3};
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape_rt);
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{1}), rt, op::Parameters{A});
auto manager = runtime::Manager::get("NGVM");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
// Create some tensors for input/output
auto a = ngraph::runtime::make_tensor<element::Float32>(shape_a);
*a = vector<float>{1, 2, 3, 4, 5, 6};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape_rt);
(*cf)({a}, {result});
ASSERT_EQ((vector<float>{3, 7, 11}), result->get_vector());
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
// input tensors, so let's do this too.
ASSERT_EQ((vector<float>{1, 2, 3, 4, 5, 6}), a->get_vector());
}
TEST(execute, sum_matrix_rows_zero)
{
auto shape_a = Shape{3, 0};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape_a);
auto shape_rt = Shape{3};
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape_rt);
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{1}), rt, op::Parameters{A});
auto manager = runtime::Manager::get("NGVM");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
// Create some tensors for input/output
auto a = ngraph::runtime::make_tensor<element::Float32>(shape_a);
*a = vector<float>{};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape_rt);
(*cf)({a}, {result});
ASSERT_EQ((vector<float>{0, 0, 0}), result->get_vector());
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
// input tensors, so let's do this too.
ASSERT_EQ((vector<float>{}), a->get_vector());
}
TEST(execute, sum_matrix_cols_zero)
{
// Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})).
auto shape_a = Shape{0, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape_a);
auto shape_rt = Shape{2};
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape_rt);
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{0}), rt, op::Parameters{A});
auto manager = runtime::Manager::get("NGVM");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
// Create some tensors for input/output
auto a = ngraph::runtime::make_tensor<element::Float32>(shape_a);
*a = vector<float>{};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape_rt);
(*cf)({a}, {result});
ASSERT_EQ((vector<float>{0, 0}), result->get_vector());
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
// input tensors, so let's do this too.
ASSERT_EQ((vector<float>{}), a->get_vector());
}
TEST(execute, sum_vector_zero)
{
auto shape_a = Shape{0};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape_a);
auto shape_rt = Shape{};
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape_rt);
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{0}), rt, op::Parameters{A});
auto manager = runtime::Manager::get("NGVM");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
// Create some tensors for input/output
auto a = ngraph::runtime::make_tensor<element::Float32>(shape_a);
*a = vector<float>{};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape_rt);
(*cf)({a}, {result});
ASSERT_EQ((vector<float>{0}), result->get_vector());
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
// input tensors, so let's do this too.
ASSERT_EQ((vector<float>{}), a->get_vector());
}
TEST(execute, sum_matrix_to_scalar_zero_by_zero)
{
auto shape_a = Shape{0, 0};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape_a);
auto shape_rt = Shape{};
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape_rt);
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{0, 1}), rt, op::Parameters{A});
auto manager = runtime::Manager::get("NGVM");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
// Create some tensors for input/output
auto a = ngraph::runtime::make_tensor<element::Float32>(shape_a);
*a = vector<float>{};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape_rt);
(*cf)({a}, {result});
ASSERT_EQ((vector<float>{0}), result->get_vector());
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
// input tensors, so let's do this too.
ASSERT_EQ((vector<float>{}), a->get_vector());
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment