Commit d79361b4 authored by Adam Procter's avatar Adam Procter Committed by GitHub

Vector dot product, scalar-tensor product, matrix-vector product (#144)

* Vector dot product implementation

* Implement matrix mult instruction in VM

* Add implementations for scalar-tensor and matrix/vector products.
parent 866b7556
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/runtime/call_frame.hpp"
#include "ngraph/runtime/eigen/utils.hpp"
#include "ngraph/runtime/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
{
namespace runtime
{
namespace eigen
{
template <typename T>
void dot(T arg0, T arg1, T out)
{
(&*out)->get_vector()[0] = get_map_matrix(&*arg0).dot(get_map_matrix(&*arg1));
}
template <typename ET>
class DotInstruction : public Instruction
{
public:
DotInstruction(size_t arg0, size_t arg1, size_t out)
: m_arg0(arg0)
, m_arg1(arg1)
, m_out(out)
{
}
virtual void execute(CallFrame& call_frame) const override
{
runtime::eigen::dot(
call_frame.get_parameterized_tensor<ET>(m_arg0),
call_frame.get_parameterized_tensor<ET>(m_arg1),
call_frame.get_parameterized_tensor<ET>(m_out));
}
protected:
size_t m_arg0;
size_t m_arg1;
size_t m_out;
};
}
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/runtime/call_frame.hpp"
#include "ngraph/runtime/eigen/utils.hpp"
#include "ngraph/runtime/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
{
namespace runtime
{
namespace eigen
{
template <typename T>
void matrix_mult(T arg0, T arg1, T out)
{
set_map_matrix_2d(&*out,get_map_matrix_2d(&*arg0) * get_map_matrix_2d(&*arg1));
}
template <typename ET>
class MatrixMultInstruction : public Instruction
{
public:
MatrixMultInstruction(size_t arg0, size_t arg1, size_t out)
: m_arg0(arg0)
, m_arg1(arg1)
, m_out(out)
{
}
virtual void execute(CallFrame& call_frame) const override
{
runtime::eigen::matrix_mult(
call_frame.get_parameterized_tensor<ET>(m_arg0),
call_frame.get_parameterized_tensor<ET>(m_arg1),
call_frame.get_parameterized_tensor<ET>(m_out));
}
protected:
size_t m_arg0;
size_t m_arg1;
size_t m_out;
};
}
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/runtime/call_frame.hpp"
#include "ngraph/runtime/eigen/utils.hpp"
#include "ngraph/runtime/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
{
namespace runtime
{
namespace eigen
{
template <typename T>
void matrix_vector_product(T arg0, T arg1, T out)
{
set_map_matrix(&*out,get_map_matrix_2d(&*arg0) * get_map_matrix(&*arg1));
}
template <typename ET>
class MatrixVectorProductInstruction : public Instruction
{
public:
MatrixVectorProductInstruction(size_t arg0, size_t arg1, size_t out)
: m_arg0(arg0)
, m_arg1(arg1)
, m_out(out)
{
}
virtual void execute(CallFrame& call_frame) const override
{
runtime::eigen::matrix_vector_product(
call_frame.get_parameterized_tensor<ET>(m_arg0),
call_frame.get_parameterized_tensor<ET>(m_arg1),
call_frame.get_parameterized_tensor<ET>(m_out));
}
protected:
size_t m_arg0;
size_t m_arg1;
size_t m_out;
};
}
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/runtime/call_frame.hpp"
#include "ngraph/runtime/eigen/utils.hpp"
#include "ngraph/runtime/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
{
namespace runtime
{
namespace eigen
{
template <typename T>
void scalar_tensor_product(T arg0, T arg1, T out)
{
set_map_matrix(&*out,(&*arg0)->get_vector()[0] * get_map_matrix(&*arg1));
}
template <typename ET>
class ScalarTensorProductInstruction : public Instruction
{
public:
ScalarTensorProductInstruction(size_t arg0, size_t arg1, size_t out)
: m_arg0(arg0)
, m_arg1(arg1)
, m_out(out)
{
}
virtual void execute(CallFrame& call_frame) const override
{
runtime::eigen::scalar_tensor_product(
call_frame.get_parameterized_tensor<ET>(m_arg0),
call_frame.get_parameterized_tensor<ET>(m_arg1),
call_frame.get_parameterized_tensor<ET>(m_out));
}
protected:
size_t m_arg0;
size_t m_arg1;
size_t m_out;
};
}
}
}
......@@ -40,6 +40,70 @@ namespace ngraph
&v[0], v.size(), 1) = u;
}
template <typename T, typename U>
void set_map_matrix(std::shared_ptr<T>& t, const U& u)
{
auto& v = t->get_vector();
Eigen::Map<Eigen::Matrix<typename T::value_type, Eigen::Dynamic, 1>>(
&v[0], v.size(), 1) = u;
}
template <typename T, typename U>
void set_map_matrix(T* t, const U& u)
{
auto& v = t->get_vector();
Eigen::Map<Eigen::Matrix<typename T::value_type, Eigen::Dynamic, 1>>(
&v[0], v.size(), 1) = u;
}
template <typename T, typename U>
void set_map_2d(std::shared_ptr<T>& t, const U& u)
{
auto& v = t->get_vector();
auto& s = t->get_shape();
auto s_rest = std::vector<size_t>(s.begin() + 1, s.end());
Eigen::Map<Eigen::Array<typename T::value_type,
Eigen::Dynamic, Eigen::Dynamic,
Eigen::RowMajor>>(
&v[0], s[0], ngraph::shape_size(s_rest)) = u;
}
template <typename T, typename U>
void set_map_2d(T* t, const U& u)
{
auto& v = t->get_vector();
auto& s = t->get_shape();
auto s_rest = std::vector<size_t>(s.begin() + 1, s.end());
Eigen::Map<Eigen::Array<typename T::value_type,
Eigen::Dynamic, Eigen::Dynamic,
Eigen::RowMajor>>(
&v[0], s[0], ngraph::shape_size(s_rest)) = u;
}
template <typename T, typename U>
void set_map_matrix_2d(std::shared_ptr<T>& t, const U& u)
{
auto& v = t->get_vector();
auto& s = t->get_shape();
auto s_rest = std::vector<size_t>(s.begin() + 1, s.end());
Eigen::Map<Eigen::Matrix<typename T::value_type,
Eigen::Dynamic, Eigen::Dynamic,
Eigen::RowMajor>>(
&v[0], s[0], ngraph::shape_size(s_rest)) = u;
}
template <typename T, typename U>
void set_map_matrix_2d(T* t, const U& u)
{
auto& v = t->get_vector();
auto& s = t->get_shape();
auto s_rest = std::vector<size_t>(s.begin() + 1, s.end());
Eigen::Map<Eigen::Matrix<typename T::value_type,
Eigen::Dynamic, Eigen::Dynamic,
Eigen::RowMajor>>(
&v[0], s[0], ngraph::shape_size(s_rest)) = u;
}
template <typename T>
Eigen::Map<Eigen::Array<typename T::value_type, Eigen::Dynamic, 1>>
get_map(std::shared_ptr<T>& arg)
......@@ -56,6 +120,73 @@ namespace ngraph
return Eigen::Map<Eigen::Array<typename T::value_type, Eigen::Dynamic, 1>>(
&v[0], v.size(), 1);
}
template <typename T>
Eigen::Map<Eigen::Matrix<typename T::value_type, Eigen::Dynamic, 1>>
get_map_matrix(std::shared_ptr<T>& arg)
{
auto& v = arg->get_vector();
return Eigen::Map<Eigen::Matrix<typename T::value_type, Eigen::Dynamic, 1>>(
&v[0], v.size(), 1);
}
template <typename T>
Eigen::Map<Eigen::Matrix<typename T::value_type, Eigen::Dynamic, 1>> get_map_matrix(T* arg)
{
auto& v = arg->get_vector();
return Eigen::Map<Eigen::Matrix<typename T::value_type, Eigen::Dynamic, 1>>(
&v[0], v.size(), 1);
}
template <typename T>
Eigen::Map<Eigen::Array<typename T::value_type, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>>
get_map_2d(std::shared_ptr<T>& arg)
{
auto& v = arg->get_vector();
auto& s = arg->get_shape();
auto s_rest = std::vector<size_t>(s.begin() + 1, s.end());
return Eigen::Map<Eigen::Array<typename T::value_type,
Eigen::Dynamic, Eigen::Dynamic,
Eigen::RowMajor>>(
&v[0], s[0], ngraph::shape_size(s_rest));
}
template <typename T>
Eigen::Map<Eigen::Array<typename T::value_type, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>> get_map_2d(T* arg)
{
auto& v = arg->get_vector();
auto& s = arg->get_shape();
auto s_rest = std::vector<size_t>(s.begin() + 1, s.end());
return Eigen::Map<Eigen::Array<typename T::value_type,
Eigen::Dynamic, Eigen::Dynamic,
Eigen::RowMajor>>(
&v[0], s[0], ngraph::shape_size(s_rest));
}
template <typename T>
Eigen::Map<Eigen::Matrix<typename T::value_type, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>>
get_map_array_2d(std::shared_ptr<T>& arg)
{
auto& v = arg->get_vector();
auto& s = arg->get_shape();
auto s_rest = std::vector<size_t>(s.begin() + 1, s.end());
return Eigen::Map<Eigen::Matrix<typename T::value_type,
Eigen::Dynamic, Eigen::Dynamic,
Eigen::RowMajor>>(
&v[0], s[0], ngraph::shape_size(s_rest));
}
template <typename T>
Eigen::Map<Eigen::Matrix<typename T::value_type,Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>> get_map_matrix_2d(T* arg)
{
auto& v = arg->get_vector();
auto& s = arg->get_shape();
auto s_rest = std::vector<size_t>(s.begin() + 1, s.end());
return Eigen::Map<Eigen::Matrix<typename T::value_type,
Eigen::Dynamic, Eigen::Dynamic,
Eigen::RowMajor>>(
&v[0], s[0], ngraph::shape_size(s_rest));
}
}
}
}
......@@ -26,6 +26,7 @@
#include "ngraph/ops/add.hpp"
#include "ngraph/ops/constant.hpp"
#include "ngraph/ops/divide.hpp"
#include "ngraph/ops/dot.hpp"
#include "ngraph/ops/equal.hpp"
#include "ngraph/ops/less.hpp"
#include "ngraph/ops/log.hpp"
......@@ -42,14 +43,18 @@
#include "ngraph/runtime/eigen/add.hpp"
#include "ngraph/runtime/eigen/constant.hpp"
#include "ngraph/runtime/eigen/divide.hpp"
#include "ngraph/runtime/eigen/dot.hpp"
#include "ngraph/runtime/eigen/equal.hpp"
#include "ngraph/runtime/eigen/less_than.hpp"
#include "ngraph/runtime/eigen/log.hpp"
#include "ngraph/runtime/eigen/matrix_mult.hpp"
#include "ngraph/runtime/eigen/matrix_vector_product.hpp"
#include "ngraph/runtime/eigen/maximum.hpp"
#include "ngraph/runtime/eigen/multiply.hpp"
#include "ngraph/runtime/eigen/negate.hpp"
#include "ngraph/runtime/eigen/not_equal.hpp"
#include "ngraph/runtime/eigen/return.hpp"
#include "ngraph/runtime/eigen/scalar_tensor_product.hpp"
#include "ngraph/runtime/eigen/select.hpp"
#include "ngraph/runtime/eigen/subtract.hpp"
#include "ngraph/runtime/utils.hpp"
......@@ -112,7 +117,71 @@ std::unordered_map<std::type_index,
REGISTER_TERNOP(op::Select, runtime::eigen::SelectInstruction<element::Float32>);
REGISTER_BINOP (op::Subtract,runtime::eigen::SubtractInstruction<element::Float32>);
// Parameter, as a "runtime no-op", is a special case.
op_map[type_index(typeid(op::Dot))] = [](Node* n,
ExternalFunction* ef,
const std::vector<size_t>& in,
const std::vector<size_t>& out) {
auto& arg_nodes = n->get_arguments();
assert(arg_nodes.size() == 2);
auto arg0_tensor_type =
dynamic_pointer_cast<TensorViewType>(arg_nodes.at(0)->get_value_type());
assert(nullptr != arg0_tensor_type);
auto arg1_tensor_type =
dynamic_pointer_cast<TensorViewType>(arg_nodes.at(1)->get_value_type());
assert(nullptr != arg1_tensor_type);
auto arg0_shape = arg0_tensor_type->get_shape();
auto arg1_shape = arg1_tensor_type->get_shape();
// If arg0 or arg1 is a scalar, emit a scalar-tensor product.
if(arg0_shape.size() == 0)
{
ef->get_instructions()->push_back(
make_shared<runtime::eigen::ScalarTensorProductInstruction<element::Float32>>(
in[0], in[1], out[0]));
}
else if(arg1_shape.size() == 0)
{
// If arg1 is the scalar, do the same thing but switch the order of operands.
ef->get_instructions()->push_back(
make_shared<runtime::eigen::ScalarTensorProductInstruction<element::Float32>>(
in[1], in[0], out[0]));
}
// If arg0 and arg1 are both vectors, emit a dot product.
else if(arg0_shape.size() == 1 && arg1_shape.size() == 1)
{
ef->get_instructions()->push_back(
make_shared<runtime::eigen::DotInstruction<element::Float32>>(
in[0], in[1], out[0]));
}
// If arg0 is a matrix and arg1 is a vector, emit a matrix-vector product.
else if(arg0_shape.size() == 2 && arg1_shape.size() == 1)
{
ef->get_instructions()->push_back(
make_shared<runtime::eigen::MatrixVectorProductInstruction<element::Float32>>(
in[0], in[1], out[0]));
}
// If arg0 and arg1 are both matrices, emit a matrix product.
else if(arg0_shape.size() == 2 && arg1_shape.size() == 2)
{
ef->get_instructions()->push_back(
make_shared<runtime::eigen::MatrixMultInstruction<element::Float32>>(
in[0], in[1], out[0]));
}
else
{
throw ngraph_error("Dot product for tensors with rank>2 not implemented yet.");
}
};
// Parameter is a "runtime no-op" because the output tensor has already been filled.
op_map[type_index(typeid(op::Parameter))] = [](Node* n,
ExternalFunction* ef,
const std::vector<size_t>& in,
......
......@@ -44,6 +44,7 @@ namespace ngraph
public:
ParameterizedTensorView(const ngraph::Shape& shape)
: m_vector(ngraph::shape_size(shape), 0)
, m_shape(shape)
{
}
......@@ -73,9 +74,11 @@ namespace ngraph
// For getting the data out
storage_type& get_vector() { return m_vector; }
const ngraph::Shape& get_shape() const { return m_shape; }
protected:
storage_type m_vector;
ngraph::Shape m_shape;
};
}
}
......@@ -109,6 +109,53 @@ TEST(execute, test_equal)
ASSERT_EQ((vector<char>{1, 1, 0, 0, 0, 1, 1, 0}), result->get_vector());
}
TEST(execute, test_dot1d)
{
auto shape = Shape{4};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto shape_r = Shape{1};
auto f = make_shared<Function>(make_shared<op::Dot>(A,B), op::Parameters{A,B});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
// Create some tensors for input/output
auto a = ngraph::runtime::make_tensor<element::Float32>(shape);
*a = vector<float>{2, 4, 8, 16};
auto b = ngraph::runtime::make_tensor<element::Float32>(shape);
*b = vector<float>{1, 2, 4, 8};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape_r);
(*cf)({a,b}, {result});
ASSERT_EQ((vector<float>{170}), result->get_vector());
}
TEST(execute, test_dot2d)
{
auto shape = Shape{2,2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto shape_r = Shape{2,2};
auto f = make_shared<Function>(make_shared<op::Dot>(A,B), op::Parameters{A,B});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
// Create some tensors for input/output
auto a = ngraph::runtime::make_tensor<element::Float32>(shape);
*a = vector<float>{1, 2,
3, 4};
auto b = ngraph::runtime::make_tensor<element::Float32>(shape);
*b = vector<float>{5, 6,
7, 8};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape_r);
(*cf)({a,b}, {result});
ASSERT_EQ((vector<float>{19,22,
43,50}), result->get_vector());
}
TEST(execute, test_lessthan)
{
auto shape = Shape{2, 2, 2};
......@@ -208,6 +255,97 @@ TEST(execute, test_notequal)
ASSERT_EQ((vector<char>{0, 0, 1, 1, 1, 0, 0, 1}), result->get_vector());
}
TEST(execute, test_scalar_tensor_arg0)
{
auto shape_a = Shape{};
auto shape_b = Shape{2,2,2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape_a);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape_b);
auto f = make_shared<Function>(make_shared<op::Dot>(A,B), op::Parameters{A,B});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
// Create some tensors for input/output
auto a = ngraph::runtime::make_tensor<element::Float32>(shape_a);
*a = vector<float>{6};
auto b = ngraph::runtime::make_tensor<element::Float32>(shape_b);
*b = vector<float>{1, 2, 3, 4, 5, 6, 7, 8};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape_b);
(*cf)({a,b}, {result});
ASSERT_EQ((vector<float>{6, 12, 18, 24, 30, 36, 42, 48}), result->get_vector());
}
TEST(execute, test_scalar_tensor_arg1)
{
auto shape_a = Shape{2,2,2};
auto shape_b = Shape{};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape_a);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape_b);
auto f = make_shared<Function>(make_shared<op::Dot>(A,B), op::Parameters{A,B});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
// Create some tensors for input/output
auto a = ngraph::runtime::make_tensor<element::Float32>(shape_a);
*a = vector<float>{1, 2, 3, 4, 5, 6, 7, 8};
auto b = ngraph::runtime::make_tensor<element::Float32>(shape_b);
*b = vector<float>{6};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape_a);
(*cf)({a,b}, {result});
ASSERT_EQ((vector<float>{6, 12, 18, 24, 30, 36, 42, 48}), result->get_vector());
}
TEST(execute, test_scalar_scalar)
{
auto shape = Shape{};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto f = make_shared<Function>(make_shared<op::Dot>(A,B), op::Parameters{A,B});
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
// Create some tensors for input/output
auto a = ngraph::runtime::make_tensor<element::Float32>(shape);
*a = vector<float>{8};
auto b = ngraph::runtime::make_tensor<element::Float32>(shape);
*b = vector<float>{6};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape);
(*cf)({a,b}, {result});
ASSERT_EQ((vector<float>{48}), result->get_vector());
}
TEST(execute, test_matrix_vector)
{
auto shape_a = Shape{4,4};
auto shape_b = Shape{4};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape_a);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape_b);
auto f = make_shared<Function>(make_shared<op::Dot>(A,B), op::Parameters{A,B});
auto shape_r = Shape{4};
auto external = make_shared<ngraph::runtime::ExternalFunction>(f);
auto cf = external->make_call_frame();
// Create some tensors for input/output
auto a = ngraph::runtime::make_tensor<element::Float32>(shape_a);
*a = vector<float>{ 1, 2, 3, 4,
5, 6, 7, 8,
9,10,11,12,
13,14,15,16};
auto b = ngraph::runtime::make_tensor<element::Float32>(shape_b);
*b = vector<float>{17,18,19,20};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape_r);
(*cf)({a,b}, {result});
ASSERT_EQ((vector<float>{190,486,782,1078}), result->get_vector());
}
TEST(execute, test_select)
{
auto shape = Shape{2, 2, 2};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment