Commit cdb079b8 authored by Adam Procter's avatar Adam Procter Committed by Scott Cyphers

Implement vector and matrix concatenation through VM (#147)

* Implement vector and matrix concatenation through VM

* Address PR comments

* Comments on Concat semantics

* Remove some redundant template parameters in concat_matrix and concat_vector

* Formatting fixes in external_function.cpp
parent d79361b4
......@@ -21,5 +21,72 @@ using namespace ngraph::op;
void Concat::propagate_types()
{
throw ngraph_error("NIY");
if (m_arguments.size() < 1)
{
throw ngraph_error("At least one argument required");
}
auto arg0_type = m_arguments.at(0)->get_value_type();
if (nullptr == arg0_type)
{
throw ngraph_error("Argument to concat is missing type.");
}
auto arg0_tensor_view_type = dynamic_pointer_cast<TensorViewType>(arg0_type);
if (nullptr == arg0_tensor_view_type)
{
throw ngraph_error("Argument to concat is not a tensor view");
}
auto arg0_shape = arg0_tensor_view_type->get_shape();
if (m_concatenation_axis >= arg0_shape.size())
{
throw ngraph_error("Concatenation axis is out of bounds");
}
size_t concatenation_axis_length = arg0_shape.at(m_concatenation_axis);
auto& arg0_element_type = arg0_tensor_view_type->get_element_type();
for(auto i = 1; i < m_arguments.size(); i++)
{
auto argi_type = m_arguments.at(i)->get_value_type();
if (nullptr == argi_type)
{
throw ngraph_error("Argument to concat is missing type.");
}
auto argi_tensor_view_type = dynamic_pointer_cast<TensorViewType>(argi_type);
if (nullptr == argi_tensor_view_type)
{
throw ngraph_error("Argument to concat is not a tensor view");
}
auto argi_shape = argi_tensor_view_type->get_shape();
if (argi_shape.size() != arg0_shape.size())
{
throw ngraph_error("Arguments to concat do not have same rank");
}
if (argi_tensor_view_type->get_element_type() != arg0_element_type)
{
throw ngraph_error("Argument element types do not match");
}
for(auto j = 0; j < argi_shape.size(); j++)
{
if (j != m_concatenation_axis && arg0_shape.at(j) != argi_shape.at(j))
{
throw ngraph_error("Arguments to concat do not have same dimension on a non-concatenation axis");
}
else if (j == m_concatenation_axis)
{
concatenation_axis_length += argi_shape.at(j);
}
}
}
vector<size_t> concatenated_shape = arg0_shape;
concatenated_shape.at(m_concatenation_axis) = concatenation_axis_length;
set_value_type_checked(make_shared<TensorViewType>(arg0_element_type, concatenated_shape));
}
......@@ -21,13 +21,28 @@ namespace ngraph
class Concat : public Builtin
{
public:
Concat(const Nodes& args)
/// Concatenates one or more tensors.
///
/// All tensors must have the same rank, and the sizes of the axes must match
/// everywhere except at the concatenation axis. The size of the concatenation
/// axis on the output is the sum of its size on all inputs; the size of other
/// axes is unchanged from the input tensors.
///
/// Example: n0 has shape {2,4,2}, and n1 has shape {2,5,2}. Then the output of
/// Concat(Nodes{n0,n1},1) will have shape {2,9,2}.
Concat(const Nodes& args,size_t concatenation_axis)
: Builtin(args)
, m_concatenation_axis(concatenation_axis)
{
}
virtual std::string description() const override { return "Concatenate"; }
virtual void propagate_types() override;
size_t get_concatenation_axis() const { return m_concatenation_axis; }
protected:
const size_t m_concatenation_axis;
};
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/runtime/call_frame.hpp"
#include "ngraph/runtime/eigen/utils.hpp"
#include "ngraph/runtime/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
{
namespace runtime
{
namespace eigen
{
// Intended substitutions for T are shared_ptr<ParameterizedTensorView<...>>
// and ParameterizedTensorView<...>*.
template <typename T>
void concat_matrix(std::vector<T>& args, T out, size_t axis)
{
auto mat_out = get_map_matrix_2d(&*out);
auto& out_shape = out->get_shape();
assert (out_shape.size() == 2);
assert (axis == 0 || axis == 1);
size_t concat_pos = 0;
for(T arg : args)
{
auto mat_arg = get_map_matrix_2d(&*arg);
auto& arg_shape = arg->get_shape();
assert (arg_shape.size() == 2);
if (axis == 0)
{
mat_out.block(concat_pos,0,arg_shape.at(0),arg_shape.at(1))
<< mat_arg;
concat_pos += arg_shape.at(0);
}
else
{
mat_out.block(0,concat_pos,arg_shape.at(0),arg_shape.at(1))
<< mat_arg;
concat_pos += arg_shape.at(1);
}
}
}
template <typename ET>
class ConcatMatrixInstruction : public Instruction
{
public:
ConcatMatrixInstruction(const std::vector<size_t>& args, size_t axis, size_t out)
: m_args(args)
, m_axis(axis)
, m_out(out)
{
}
virtual void execute(CallFrame& call_frame) const override
{
std::vector<ParameterizedTensorView<ET>*> ptvs;
for(size_t arg : m_args)
{
ptvs.push_back(call_frame.get_parameterized_tensor<ET>(arg));
}
runtime::eigen::concat_matrix(
ptvs,
call_frame.get_parameterized_tensor<ET>(m_out),
m_axis);
}
protected:
std::vector<size_t> m_args;
size_t m_axis;
size_t m_out;
};
}
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/runtime/call_frame.hpp"
#include "ngraph/runtime/eigen/utils.hpp"
#include "ngraph/runtime/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
{
namespace runtime
{
namespace eigen
{
// Intended substitutions for T are shared_ptr<ParameterizedTensorView<...>>
// and ParameterizedTensorView<...>*.
template <typename T>
void concat_vector(std::vector<T>& args, T out)
{
auto vec_out = get_map_matrix(&*out);
auto& out_shape = out->get_shape();
assert (out_shape.size() == 1);
size_t concat_pos = 0;
for(T arg : args)
{
auto vec_arg = get_map_matrix(&*arg);
auto& arg_shape = arg->get_shape();
assert (arg_shape.size() == 1);
vec_out.segment(concat_pos,arg_shape.at(0)) << vec_arg;
concat_pos += arg_shape.at(0);
}
}
template <typename ET>
class ConcatVectorInstruction : public Instruction
{
public:
ConcatVectorInstruction(const std::vector<size_t>& args, size_t out)
: m_args(args)
, m_out(out)
{
}
virtual void execute(CallFrame& call_frame) const override
{
std::vector<ParameterizedTensorView<ET>*> ptvs;
for(size_t arg : m_args)
{
ptvs.push_back(call_frame.get_parameterized_tensor<ET>(arg));
}
runtime::eigen::concat_vector(
ptvs,
call_frame.get_parameterized_tensor<ET>(m_out));
}
protected:
std::vector<size_t> m_args;
size_t m_out;
};
}
}
}
......@@ -24,6 +24,7 @@
#include "ngraph/node.hpp"
#include "ngraph/ops/abs.hpp"
#include "ngraph/ops/add.hpp"
#include "ngraph/ops/concatenate.hpp"
#include "ngraph/ops/constant.hpp"
#include "ngraph/ops/divide.hpp"
#include "ngraph/ops/dot.hpp"
......@@ -41,6 +42,8 @@
#include "ngraph/runtime/external_function.hpp"
#include "ngraph/runtime/eigen/abs.hpp"
#include "ngraph/runtime/eigen/add.hpp"
#include "ngraph/runtime/eigen/concat_matrix.hpp"
#include "ngraph/runtime/eigen/concat_vector.hpp"
#include "ngraph/runtime/eigen/constant.hpp"
#include "ngraph/runtime/eigen/divide.hpp"
#include "ngraph/runtime/eigen/dot.hpp"
......@@ -117,6 +120,34 @@ std::unordered_map<std::type_index,
REGISTER_TERNOP(op::Select, runtime::eigen::SelectInstruction<element::Float32>);
REGISTER_BINOP (op::Subtract,runtime::eigen::SubtractInstruction<element::Float32>);
op_map[type_index(typeid(op::Concat))] = [](Node* n,
ExternalFunction* ef,
const std::vector<size_t>& in,
const std::vector<size_t>& out) {
auto result_tensor_type =
dynamic_pointer_cast<TensorViewType>(n->get_value_type());
assert(nullptr != result_tensor_type);
auto result_shape = result_tensor_type->get_shape();
if (result_shape.size() == 1)
{
ef->get_instructions()->push_back(
make_shared<runtime::eigen::ConcatVectorInstruction<element::Float32>>(
in, out[0]));
}
else if(result_shape.size() == 2)
{
ef->get_instructions()->push_back(
make_shared<runtime::eigen::ConcatMatrixInstruction<element::Float32>>(
in, (dynamic_cast<op::Concat *>(n))->get_concatenation_axis(), out[0]));
}
else
{
throw ngraph_error("Concat not implemented for rank>2 in VM yet");
}
};
op_map[type_index(typeid(op::Dot))] = [](Node* n,
ExternalFunction* ef,
const std::vector<size_t>& in,
......
......@@ -67,6 +67,105 @@ TEST(execute, test_abs)
ASSERT_EQ((vector<float>{1, 2, 0, 4.8f}), result->get_vector());
}
TEST(execute, test_concat_matrix_colwise)
{
auto shape_a = Shape{2, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape_a);
auto shape_b = Shape{2, 3};
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape_b);
auto shape_c = Shape{2, 3};
auto C = make_shared<op::Parameter>(element::Float32::element_type(), shape_c);
auto shape_r = Shape{2, 8};
auto f = make_shared<Function>(make_shared<op::Concat>(Nodes{A,B,C},1), op::Parameters{A,B,C});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
// Create some tensors for input/output
auto a = ngraph::runtime::make_tensor<element::Float32>(shape_a);
*a = vector<float>{2, 4,
8, 16};
auto b = ngraph::runtime::make_tensor<element::Float32>(shape_b);
*b = vector<float>{ 1, 2, 4,
8,16,32};
auto c = ngraph::runtime::make_tensor<element::Float32>(shape_c);
*c = vector<float>{ 2, 3, 5,
7,11,13};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape_r);
(*cf)({a,b,c}, {result});
ASSERT_EQ((vector<float>{ 2, 4, 1, 2, 4, 2, 3, 5,
8, 16, 8, 16,32, 7, 11, 13}), result->get_vector());
}
TEST(execute, test_concat_matrix_rowwise)
{
auto shape_a = Shape{2, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape_a);
auto shape_b = Shape{3, 2};
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape_b);
auto shape_c = Shape{3, 2};
auto C = make_shared<op::Parameter>(element::Float32::element_type(), shape_c);
auto shape_r = Shape{8, 2};
auto f = make_shared<Function>(make_shared<op::Concat>(Nodes{A,B,C},0), op::Parameters{A,B,C});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
// Create some tensors for input/output
auto a = ngraph::runtime::make_tensor<element::Float32>(shape_a);
*a = vector<float>{2, 4,
8, 16};
auto b = ngraph::runtime::make_tensor<element::Float32>(shape_b);
*b = vector<float>{ 1, 2,
4, 8,
16,32};
auto c = ngraph::runtime::make_tensor<element::Float32>(shape_c);
*c = vector<float>{ 2, 3,
5, 7,
11,13};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape_r);
(*cf)({a,b,c}, {result});
ASSERT_EQ((vector<float>{ 2, 4,
8, 16,
1, 2,
4, 8,
16, 32,
2, 3,
5, 7,
11, 13}), result->get_vector());
}
TEST(execute, test_concat_vector)
{
auto shape_a = Shape{4};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape_a);
auto shape_b = Shape{6};
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape_b);
auto shape_c = Shape{2};
auto C = make_shared<op::Parameter>(element::Float32::element_type(), shape_c);
auto shape_r = Shape{12};
auto f = make_shared<Function>(make_shared<op::Concat>(Nodes{A,B,C},0), op::Parameters{A,B,C});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
// Create some tensors for input/output
auto a = ngraph::runtime::make_tensor<element::Float32>(shape_a);
*a = vector<float>{2,4,8,16};
auto b = ngraph::runtime::make_tensor<element::Float32>(shape_b);
*b = vector<float>{1,2,4,8,16,32};
auto c = ngraph::runtime::make_tensor<element::Float32>(shape_c);
*c = vector<float>{18,19};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape_r);
(*cf)({a,b,c}, {result});
ASSERT_EQ((vector<float>{2,4,8,16,1,2,4,8,16,32,18,19}), result->get_vector());
}
TEST(execute, test_divide)
{
auto shape = Shape{2, 2};
......
......@@ -94,12 +94,149 @@ TEST(type_prop, broadcast_bad_arguments)
}
}
TEST(type_prop, concat_deduce)
{
// Deduce type
auto param0 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 3, 4});
auto param1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 7, 4});
auto param2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 2, 4});
auto c = make_shared<op::Concat>(Nodes{param0,param1,param2}, 1);
c->propagate_types();
auto c_vt = c->get_value_type();
ASSERT_EQ(*c_vt, TensorViewType(element::Float32::element_type(), Shape{2, 12, 4}));
}
TEST(type_prop, concat_deduce_incorrect)
{
// Check deduced type against incorrectly specified type
auto param0 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 3, 4});
auto param1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 7, 4});
auto param2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 2, 4});
auto c = make_shared<op::Concat>(Nodes{param0,param1,param2}, 1);
c->set_value_type(
make_shared<TensorViewType>(element::Float32::element_type(), Shape{2, 14, 4}));
try
{
c->propagate_types();
// Should have thrown, so fail if it didn't
FAIL() << "Deduced type should disagree with specified type";
}
catch (const ngraph_error& error)
{
EXPECT_EQ(error.what(), std::string("Setting value type to a different ValueType"));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, concat_deduce_wrong_rank)
{
auto param0 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 3, 4});
auto param1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 7, 4});
auto param2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 2,});
auto c = make_shared<op::Concat>(Nodes{param0,param1,param2}, 1);
try
{
c->propagate_types();
// Should have thrown, so fail if it didn't
FAIL() << "Deduced type should disagree with specified type";
}
catch (const ngraph_error& error)
{
EXPECT_EQ(error.what(), std::string("Arguments to concat do not have same rank"));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, concat_deduce_wrong_shape)
{
auto param0 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 3, 4});
auto param1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 7, 4});
auto param2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 2, 5});
auto c = make_shared<op::Concat>(Nodes{param0,param1,param2}, 1);
try
{
c->propagate_types();
// Should have thrown, so fail if it didn't
FAIL() << "Deduced type should disagree with specified type";
}
catch (const ngraph_error& error)
{
EXPECT_EQ(error.what(), std::string("Arguments to concat do not have same dimension on a non-concatenation axis"));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, concat_deduce_axis_oob)
{
auto param0 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 3, 4});
auto param1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 7, 4});
auto param2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 2, 5});
auto c = make_shared<op::Concat>(Nodes{param0,param1,param2}, 3);
try
{
c->propagate_types();
// Should have thrown, so fail if it didn't
FAIL() << "Deduced type should disagree with specified type";
}
catch (const ngraph_error& error)
{
EXPECT_EQ(error.what(), std::string("Concatenation axis is out of bounds"));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, concat_deduce_axis_barely_in_bounds)
{
// Deduce type
auto param0 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 3, 4});
auto param1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 3, 8});
auto param2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 3, 12});
auto c = make_shared<op::Concat>(Nodes{param0,param1,param2}, 2);
c->propagate_types();
auto c_vt = c->get_value_type();
ASSERT_EQ(*c_vt, TensorViewType(element::Float32::element_type(), Shape{2, 3, 24}));
}
TEST(type_prop, concat_deduce_elem_type_mismatch)
{
auto param0 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 3, 4});
auto param1 = make_shared<op::Parameter>(element::Int32::element_type(), Shape{2, 7, 4});
auto param2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2, 2, 4});
auto c = make_shared<op::Concat>(Nodes{param0,param1,param2}, 1);
try
{
c->propagate_types();
// Should have thrown, so fail if it didn't
FAIL() << "Deduced type should disagree with specified type";
}
catch (const ngraph_error& error)
{
EXPECT_EQ(error.what(), std::string("Argument element types do not match"));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
//
// Tests for dot product.
//
TEST(type_prop, dot_deduce_scalar_2d)
{
// Deduce type for 1D arguments
// Deduce type for scalar/matrix arguments
auto param1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{});
auto param2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{4,5});
auto bc = make_shared<op::Dot>(param1, param2);
......@@ -110,7 +247,7 @@ TEST(type_prop, dot_deduce_scalar_2d)
TEST(type_prop, dot_deduce_2d_scalar)
{
// Deduce type for 1D arguments
// Deduce type for matrix/scalar arguments
auto param1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{4,5});
auto param2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{});
auto bc = make_shared<op::Dot>(param1, param2);
......@@ -121,7 +258,7 @@ TEST(type_prop, dot_deduce_2d_scalar)
TEST(type_prop, dot_deduce_scalar_scalar)
{
// Deduce type for 1D arguments
// Deduce type for scalar/scalar arguments
auto param1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{});
auto param2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{});
auto bc = make_shared<op::Dot>(param1, param2);
......@@ -132,7 +269,7 @@ TEST(type_prop, dot_deduce_scalar_scalar)
TEST(type_prop, dot_deduce_scalar_1d)
{
// Deduce type for 1D arguments
// Deduce type for scalar/vector arguments
auto param1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{});
auto param2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{6});
auto bc = make_shared<op::Dot>(param1, param2);
......@@ -143,7 +280,7 @@ TEST(type_prop, dot_deduce_scalar_1d)
TEST(type_prop, dot_deduce_1d)
{
// Deduce type for 1D arguments
// Deduce type for vector/vector arguments
auto param1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{4});
auto param2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{4});
auto bc = make_shared<op::Dot>(param1, param2);
......@@ -154,7 +291,7 @@ TEST(type_prop, dot_deduce_1d)
TEST(type_prop, dot_deduce_2d)
{
// Deduce type for 2D arguments
// Deduce type for matrix/matrix arguments
auto param1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{4,2});
auto param2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2,3});
auto bc = make_shared<op::Dot>(param1, param2);
......@@ -163,9 +300,9 @@ TEST(type_prop, dot_deduce_2d)
ASSERT_EQ(*bc_vt, TensorViewType(element::Float32::element_type(), Shape{4,3}));
}
TEST(type_prop, dot_deduce_different_d)
TEST(type_prop, dot_deduce_different_rank)
{
// Deduce type for different-dimension arguments
// Deduce type for different-rank tensor arguments
auto param1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2,8,4,2});
auto param2 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{1,2,3});
auto bc = make_shared<op::Dot>(param1, param2);
......@@ -174,7 +311,7 @@ TEST(type_prop, dot_deduce_different_d)
ASSERT_EQ(*bc_vt, TensorViewType(element::Float32::element_type(), Shape{2,8,4,1,3}));
}
TEST(type_prop, dot_deduce_different_d_correct)
TEST(type_prop, dot_deduce_different_rank_correct)
{
// Deduced type matches explicitly set type
auto param1 = make_shared<op::Parameter>(element::Float32::element_type(), Shape{2,8,4,2});
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment