Commit c95c8b68 authored by Scott Cyphers's avatar Scott Cyphers Committed by Adam Procter

Use compile-time tensor info for ops (#152)

* Have abs use layout.

* Use layout to get vector info

* Add format selector for mapped arrays
Convert some ops
Drop runtime.cpp since we don't expose the functions any more.

* Add vector concat

* concat matrix

* Switch more ops to simpler eigen wrapping
parent eec7201d
......@@ -36,9 +36,11 @@ namespace ngraph
DenseTensorViewLayout(const TensorView& tensor_view);
virtual size_t get_size() override { return m_size; }
size_t get_offset() const { return m_offset; }
size_t get_offset() const { return m_offset; }
virtual size_t get_index_offset(const std::vector<size_t>& indices) override;
const Strides& get_strides() const { return m_strides; }
protected:
Strides m_strides;
size_t m_offset;
......
......@@ -17,6 +17,7 @@
#include <tuple>
#include <vector>
#include "ngraph/descriptor/tensor_view.hpp"
#include "ngraph/descriptor/buffer_pos.hpp"
namespace ngraph
......@@ -51,6 +52,8 @@ namespace ngraph
/// With non-linear buffers, this will need to be something other than size_t.
virtual size_t get_index_offset(const std::vector<size_t>& indices) = 0;
const Shape& get_shape() const { return m_tensor_view.get_tensor_view_type()->get_shape(); }
/// Where this view is located in the buffer.
const BufferPos& get_buffer_pos() const { return m_buffer_pos; }
BufferPos& get_buffer_pos() { return m_buffer_pos; }
......
......@@ -18,7 +18,6 @@
#include <vector>
#include "ngraph/function.hpp"
#include "ngraph/runtime/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
......@@ -26,6 +25,7 @@ namespace ngraph
namespace runtime
{
class PrimaryTensorView;
class Instruction;
// A VM for executing lightly-compiled graph functions.
class CallFrame
......@@ -57,6 +57,14 @@ namespace ngraph
return m_tensor_views[i]->get_parameterized_tensor_view<ET>();
}
template<typename ET>
typename ET::type* get_tensor_view_data(size_t i)
{
return &get_parameterized_tensor_view<ET>(i)->get_vector()[0];
}
protected:
size_t m_n_inputs;
size_t m_n_outputs;
......
......@@ -18,6 +18,7 @@
#include "ngraph/runtime/eigen/utils.hpp"
#include "ngraph/runtime/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
#include "ngraph/runtime/tensor_view_info.hpp"
namespace ngraph
{
......@@ -25,17 +26,11 @@ namespace ngraph
{
namespace eigen
{
template <typename T>
void abs(T arg, T out)
{
set_map_array(&*out, Eigen::abs(get_map_array(&*arg)));
}
template <typename ET>
class AbsInstruction : public Instruction
{
public:
AbsInstruction(size_t arg, size_t out)
AbsInstruction(const TensorViewInfo& arg, const TensorViewInfo& out)
: m_arg(arg)
, m_out(out)
{
......@@ -43,14 +38,12 @@ namespace ngraph
virtual void execute(CallFrame& call_frame) const override
{
runtime::eigen::abs(
call_frame.get_parameterized_tensor_view<ET>(m_arg),
call_frame.get_parameterized_tensor_view<ET>(m_out));
EigenArray1d<ET>(call_frame, m_out) = Eigen::abs(EigenArray1d<ET>(call_frame, m_arg));
}
protected:
size_t m_arg;
size_t m_out;
TensorViewInfo m_arg;
TensorViewInfo m_out;
};
}
}
......
......@@ -25,17 +25,13 @@ namespace ngraph
{
namespace eigen
{
template <typename T>
void add(T arg0, T arg1, T out)
{
set_map_array(&*out, get_map_array(&*arg0) + get_map_array(&*arg1));
}
template <typename ET>
class AddInstruction : public Instruction
{
public:
AddInstruction(size_t arg0, size_t arg1, size_t out)
AddInstruction(const TensorViewInfo& arg0,
const TensorViewInfo& arg1,
const TensorViewInfo& out)
: m_arg0(arg0)
, m_arg1(arg1)
, m_out(out)
......@@ -44,16 +40,14 @@ namespace ngraph
virtual void execute(CallFrame& call_frame) const override
{
runtime::eigen::add(
call_frame.get_parameterized_tensor_view<ET>(m_arg0),
call_frame.get_parameterized_tensor_view<ET>(m_arg1),
call_frame.get_parameterized_tensor_view<ET>(m_out));
EigenArray1d<ET>(call_frame, m_out) =
EigenArray1d<ET>(call_frame, m_arg0) + EigenArray1d<ET>(call_frame, m_arg1);
}
protected:
size_t m_arg0;
size_t m_arg1;
size_t m_out;
TensorViewInfo m_arg0;
TensorViewInfo m_arg1;
TensorViewInfo m_out;
};
}
}
......
......@@ -29,7 +29,7 @@ namespace ngraph
class CallInstruction : public Instruction
{
public:
CallInstruction(std::shared_ptr<ExternalFunction> ef,std::vector<size_t> in, std::vector<size_t> out)
CallInstruction(std::shared_ptr<ExternalFunction> ef,std::vector<TensorViewInfo> in, std::vector<TensorViewInfo> out)
: m_external_function(ef)
, m_in(in)
, m_out(out)
......@@ -45,19 +45,19 @@ namespace ngraph
for (auto in : m_in)
{
inputs.push_back(call_frame.get_tensor_view(in));
inputs.push_back(call_frame.get_tensor_view(in.get_index()));
}
for (auto out : m_out)
{
outputs.push_back(call_frame.get_tensor_view(out));
outputs.push_back(call_frame.get_tensor_view(out.get_index()));
}
(*cf)(inputs,outputs);
}
protected:
std::shared_ptr<ExternalFunction> m_external_function;
std::vector<size_t> m_in;
std::vector<size_t> m_out;
std::vector<TensorViewInfo> m_in;
std::vector<TensorViewInfo> m_out;
};
}
}
......
......@@ -18,6 +18,7 @@
#include "ngraph/runtime/eigen/utils.hpp"
#include "ngraph/runtime/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
#include "ngraph/runtime/tensor_view_info.hpp"
namespace ngraph
{
......@@ -25,69 +26,43 @@ namespace ngraph
{
namespace eigen
{
// Intended substitutions for T are shared_ptr<ParameterizedTensorView<...>>
// and ParameterizedTensorView<...>*.
template <typename T>
void concat_matrix(std::vector<T>& args, T out, size_t axis)
{
auto mat_out = get_map_matrix_2d(&*out);
auto& out_shape = out->get_shape();
assert (out_shape.size() == 2);
assert (axis == 0 || axis == 1);
size_t concat_pos = 0;
for(T arg : args)
{
auto mat_arg = get_map_matrix_2d(&*arg);
auto& arg_shape = arg->get_shape();
assert (arg_shape.size() == 2);
if (axis == 0)
{
mat_out.block(concat_pos,0,arg_shape.at(0),arg_shape.at(1))
<< mat_arg;
concat_pos += arg_shape.at(0);
}
else
{
mat_out.block(0,concat_pos,arg_shape.at(0),arg_shape.at(1))
<< mat_arg;
concat_pos += arg_shape.at(1);
}
}
}
template <typename ET>
class ConcatMatrixInstruction : public Instruction
{
public:
ConcatMatrixInstruction(const std::vector<size_t>& args, size_t axis, size_t out)
ConcatMatrixInstruction(const std::vector<TensorViewInfo>& args,
size_t axis,
const TensorViewInfo& out)
: m_args(args)
, m_axis(axis)
, m_out(out)
{
size_t concat_pos[2]{0, 0};
for (auto arg : args)
{
auto& arg_shape = arg.get_tensor_view_layout()->get_shape();
m_blocks.push_back(
{concat_pos[0], concat_pos[1], arg_shape.at(0), arg_shape.at(1)});
concat_pos[axis] += arg_shape.at(axis);
}
}
virtual void execute(CallFrame& call_frame) const override
{
std::vector<ParameterizedTensorView<ET>*> ptvs;
for(size_t arg : m_args)
EigenMatrix<ET> out(call_frame, m_out);
for (size_t i = 0; i < m_args.size(); i++)
{
ptvs.push_back(call_frame.get_parameterized_tensor_view<ET>(arg));
auto& b = m_blocks[i];
out.block(b[0], b[1], b[2], b[3])
<< EigenMatrix<ET>(call_frame, m_args.at(i));
}
runtime::eigen::concat_matrix(
ptvs,
call_frame.get_parameterized_tensor_view<ET>(m_out),
m_axis);
}
protected:
std::vector<size_t> m_args;
size_t m_axis;
size_t m_out;
std::vector<TensorViewInfo> m_args;
size_t m_axis;
TensorViewInfo m_out;
std::vector<std::vector<size_t>> m_blocks;
};
}
}
......
......@@ -25,55 +25,37 @@ namespace ngraph
{
namespace eigen
{
// Intended substitutions for T are shared_ptr<ParameterizedTensorView<...>>
// and ParameterizedTensorView<...>*.
template <typename T>
void concat_vector(std::vector<T>& args, T out)
{
auto vec_out = get_map_matrix(&*out);
auto& out_shape = out->get_shape();
assert (out_shape.size() == 1);
size_t concat_pos = 0;
for(T arg : args)
{
auto vec_arg = get_map_matrix(&*arg);
auto& arg_shape = arg->get_shape();
assert (arg_shape.size() == 1);
vec_out.segment(concat_pos,arg_shape.at(0)) << vec_arg;
concat_pos += arg_shape.at(0);
}
}
// Would be better to just generate a sequence of copy into slice of output instructions
template <typename ET>
class ConcatVectorInstruction : public Instruction
{
public:
ConcatVectorInstruction(const std::vector<size_t>& args, size_t out)
ConcatVectorInstruction(const std::vector<TensorViewInfo>& args,
const TensorViewInfo& out)
: m_args(args)
, m_out(out)
{
for (auto arg : args)
{
auto& arg_shape = arg.get_tensor_view_layout()->get_shape();
m_sizes.push_back(arg_shape.at(0));
}
}
virtual void execute(CallFrame& call_frame) const override
{
std::vector<ParameterizedTensorView<ET>*> ptvs;
for(size_t arg : m_args)
{
ptvs.push_back(call_frame.get_parameterized_tensor_view<ET>(arg));
EigenVector<ET> out(call_frame, m_out);
size_t concat_pos = 0;
for (size_t i = 0; i < m_args.size(); i++){
out.segment(concat_pos, m_sizes[i]) << EigenVector<ET>(call_frame, m_args.at(i));
concat_pos += m_sizes[i];
}
runtime::eigen::concat_vector(
ptvs,
call_frame.get_parameterized_tensor_view<ET>(m_out));
}
protected:
std::vector<size_t> m_args;
size_t m_out;
std::vector<TensorViewInfo> m_args;
TensorViewInfo m_out;
std::vector<size_t> m_sizes;
};
}
}
......
......@@ -18,6 +18,7 @@
#include "ngraph/runtime/eigen/utils.hpp"
#include "ngraph/runtime/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
#include "ngraph/runtime/tensor_view_info.hpp"
namespace ngraph
{
......@@ -25,17 +26,11 @@ namespace ngraph
{
namespace eigen
{
template <typename ET,typename T>
void assign_constant(const std::vector<ET>& value, T out)
{
out->get_vector() = value;
}
template <typename ET>
class ConstantInstruction : public Instruction
{
public:
ConstantInstruction(const std::vector<typename ET::type> value, size_t out)
ConstantInstruction(const std::vector<typename ET::type> value, const TensorViewInfo& out)
: m_value(value)
, m_out(out)
{
......@@ -43,14 +38,12 @@ namespace ngraph
virtual void execute(CallFrame& call_frame) const override
{
runtime::eigen::assign_constant(
m_value,
call_frame.get_parameterized_tensor_view<ET>(m_out));
call_frame.get_parameterized_tensor_view<ET>(m_out.get_index())->get_vector() = m_value;
}
protected:
const std::vector<typename ET::type> m_value;
size_t m_out;
TensorViewInfo m_out;
};
}
}
......
......@@ -25,17 +25,13 @@ namespace ngraph
{
namespace eigen
{
template <typename T>
void divide(T arg0, T arg1, T out)
{
set_map_array(&*out, get_map_array(&*arg0) / get_map_array(&*arg1));
}
template <typename ET>
class DivideInstruction : public Instruction
{
public:
DivideInstruction(size_t arg0, size_t arg1, size_t out)
DivideInstruction(const TensorViewInfo& arg0,
const TensorViewInfo& arg1,
const TensorViewInfo& out)
: m_arg0(arg0)
, m_arg1(arg1)
, m_out(out)
......@@ -44,16 +40,14 @@ namespace ngraph
virtual void execute(CallFrame& call_frame) const override
{
runtime::eigen::divide(
call_frame.get_parameterized_tensor_view<ET>(m_arg0),
call_frame.get_parameterized_tensor_view<ET>(m_arg1),
call_frame.get_parameterized_tensor_view<ET>(m_out));
EigenArray1d<ET>(call_frame, m_out) =
EigenArray1d<ET>(call_frame, m_arg0) / EigenArray1d<ET>(call_frame, m_arg1);
}
protected:
size_t m_arg0;
size_t m_arg1;
size_t m_out;
TensorViewInfo m_arg0;
TensorViewInfo m_arg1;
TensorViewInfo m_out;
};
}
}
......
......@@ -25,19 +25,11 @@ namespace ngraph
{
namespace eigen
{
template <typename TI,typename TO>
void equal(TI arg0, TI arg1, TO out)
{
auto result_as_float = get_map_array(&*arg0) == get_map_array(&*arg1);
auto result_as_char = result_as_float.template cast<char>();
set_map_array(&*out, result_as_char);
}
template <typename ET>
class EqualInstruction : public Instruction
{
public:
EqualInstruction(size_t arg0, size_t arg1, size_t out)
EqualInstruction(TensorViewInfo arg0, TensorViewInfo arg1, TensorViewInfo out)
: m_arg0(arg0)
, m_arg1(arg1)
, m_out(out)
......@@ -46,16 +38,16 @@ namespace ngraph
virtual void execute(CallFrame& call_frame) const override
{
runtime::eigen::equal(
call_frame.get_parameterized_tensor_view<ET>(m_arg0),
call_frame.get_parameterized_tensor_view<ET>(m_arg1),
call_frame.get_parameterized_tensor_view<element::Bool>(m_out));
EigenArray1d<element::Bool>(call_frame, m_out) =
(EigenArray1d<ET>(call_frame, m_arg0) ==
EigenArray1d<ET>(call_frame, m_arg1))
.template cast<char>();
}
protected:
size_t m_arg0;
size_t m_arg1;
size_t m_out;
TensorViewInfo m_arg0;
TensorViewInfo m_arg1;
TensorViewInfo m_out;
};
}
}
......
......@@ -25,7 +25,7 @@ namespace ngraph
{
namespace eigen
{
template <typename TI,typename TO>
template <typename TI, typename TO>
void less_than(TI arg0, TI arg1, TO out)
{
auto result_as_float = get_map_array(&*arg0) < get_map_array(&*arg1);
......@@ -37,7 +37,7 @@ namespace ngraph
class LessThanInstruction : public Instruction
{
public:
LessThanInstruction(size_t arg0, size_t arg1, size_t out)
LessThanInstruction(TensorViewInfo arg0, TensorViewInfo arg1, TensorViewInfo out)
: m_arg0(arg0)
, m_arg1(arg1)
, m_out(out)
......@@ -46,16 +46,16 @@ namespace ngraph
virtual void execute(CallFrame& call_frame) const override
{
runtime::eigen::less_than(
call_frame.get_parameterized_tensor_view<ET>(m_arg0),
call_frame.get_parameterized_tensor_view<ET>(m_arg1),
call_frame.get_parameterized_tensor_view<element::Bool>(m_out));
EigenArray1d<element::Bool>(call_frame, m_out) =
(EigenArray1d<ET>(call_frame, m_arg0) <
EigenArray1d<ET>(call_frame, m_arg1))
.template cast<char>();
}
protected:
size_t m_arg0;
size_t m_arg1;
size_t m_out;
TensorViewInfo m_arg0;
TensorViewInfo m_arg1;
TensorViewInfo m_out;
};
}
}
......
......@@ -25,17 +25,11 @@ namespace ngraph
{
namespace eigen
{
template <typename T>
void log(T arg, T out)
{
set_map_array(&*out, Eigen::log(get_map_array(&*arg)));
}
template <typename ET>
class LogInstruction : public Instruction
{
public:
LogInstruction(size_t arg, size_t out)
LogInstruction(TensorViewInfo arg, TensorViewInfo out)
: m_arg(arg)
, m_out(out)
{
......@@ -43,14 +37,12 @@ namespace ngraph
virtual void execute(CallFrame& call_frame) const override
{
runtime::eigen::log(
call_frame.get_parameterized_tensor_view<ET>(m_arg),
call_frame.get_parameterized_tensor_view<ET>(m_out));
EigenArray1d<ET, fmt::V>(call_frame, m_out) = Eigen::log(EigenArray1d<ET, fmt::V>(call_frame, m_arg));
}
protected:
size_t m_arg;
size_t m_out;
TensorViewInfo m_arg;
TensorViewInfo m_out;
};
}
}
......
......@@ -25,17 +25,11 @@ namespace ngraph
{
namespace eigen
{
template <typename T>
void maximum(T arg0, T arg1, T out)
{
set_map_array(out, get_map_array(&*arg0).max(get_map_array(&*arg1)));
}
template <typename ET>
class MaximumInstruction : public Instruction
{
public:
MaximumInstruction(size_t arg0, size_t arg1, size_t out)
MaximumInstruction(TensorViewInfo arg0, TensorViewInfo arg1, TensorViewInfo out)
: m_arg0(arg0)
, m_arg1(arg1)
, m_out(out)
......@@ -44,16 +38,15 @@ namespace ngraph
virtual void execute(CallFrame& call_frame) const override
{
runtime::eigen::maximum(
call_frame.get_parameterized_tensor_view<ET>(m_arg0),
call_frame.get_parameterized_tensor_view<ET>(m_arg1),
call_frame.get_parameterized_tensor_view<ET>(m_out));
EigenArray1d<ET>(call_frame, m_out) =
EigenArray1d<ET>(call_frame, m_arg0)
.max(EigenArray1d<ET>(call_frame, m_arg1));
}
protected:
size_t m_arg0;
size_t m_arg1;
size_t m_out;
TensorViewInfo m_arg0;
TensorViewInfo m_arg1;
TensorViewInfo m_out;
};
}
}
......
......@@ -24,17 +24,11 @@ namespace ngraph
{
namespace eigen
{
template <typename T>
void multiply(T arg0, T arg1, T out)
{
set_map_array(&*out, get_map_array(&*arg0) * get_map_array(&*arg1));
}
template <typename ET>
class MultiplyInstruction : public Instruction
{
public:
MultiplyInstruction(size_t arg0, size_t arg1, size_t out)
MultiplyInstruction(TensorViewInfo arg0, TensorViewInfo arg1, TensorViewInfo out)
: m_arg0(arg0)
, m_arg1(arg1)
, m_out(out)
......@@ -43,16 +37,14 @@ namespace ngraph
virtual void execute(CallFrame& call_frame) const override
{
runtime::eigen::multiply(
call_frame.get_parameterized_tensor_view<ET>(m_arg0),
call_frame.get_parameterized_tensor_view<ET>(m_arg1),
call_frame.get_parameterized_tensor_view<ET>(m_out));
EigenArray1d<ET>(call_frame, m_out) =
EigenArray1d<ET>(call_frame, m_arg0) * EigenArray1d<ET>(call_frame, m_arg1);
}
protected:
size_t m_arg0;
size_t m_arg1;
size_t m_out;
TensorViewInfo m_arg0;
TensorViewInfo m_arg1;
TensorViewInfo m_out;
};
}
}
......
......@@ -25,17 +25,11 @@ namespace ngraph
{
namespace eigen
{
template <typename T>
void negate(T arg, T out)
{
set_map_array(&*out, -(get_map_array(&*arg)));
}
template <typename ET>
class NegateInstruction : public Instruction
{
public:
NegateInstruction(size_t arg, size_t out)
NegateInstruction(TensorViewInfo arg, TensorViewInfo out)
: m_arg(arg)
, m_out(out)
{
......@@ -43,14 +37,12 @@ namespace ngraph
virtual void execute(CallFrame& call_frame) const override
{
runtime::eigen::negate(
call_frame.get_parameterized_tensor_view<ET>(m_arg),
call_frame.get_parameterized_tensor_view<ET>(m_out));
EigenArray1d<ET>(call_frame, m_out) = -EigenArray1d<ET>(call_frame, m_arg);
}
protected:
size_t m_arg;
size_t m_out;
TensorViewInfo m_arg;
TensorViewInfo m_out;
};
}
}
......
......@@ -25,19 +25,11 @@ namespace ngraph
{
namespace eigen
{
template <typename TI,typename TO>
void not_equal(TI arg0, TI arg1, TO out)
{
auto result_as_float = get_map_array(&*arg0) != get_map_array(&*arg1);
auto result_as_char = result_as_float.template cast<char>();
set_map_array(&*out, result_as_char);
}
template <typename ET>
class NotEqualInstruction : public Instruction
{
public:
NotEqualInstruction(size_t arg0, size_t arg1, size_t out)
NotEqualInstruction(TensorViewInfo arg0, TensorViewInfo arg1, TensorViewInfo out)
: m_arg0(arg0)
, m_arg1(arg1)
, m_out(out)
......@@ -46,16 +38,16 @@ namespace ngraph
virtual void execute(CallFrame& call_frame) const override
{
runtime::eigen::not_equal(
call_frame.get_parameterized_tensor_view<ET>(m_arg0),
call_frame.get_parameterized_tensor_view<ET>(m_arg1),
call_frame.get_parameterized_tensor_view<element::Bool>(m_out));
EigenArray1d<element::Bool>(call_frame, m_out) =
(EigenArray1d<ET>(call_frame, m_arg0) !=
EigenArray1d<ET>(call_frame, m_arg1))
.template cast<char>();
}
protected:
size_t m_arg0;
size_t m_arg1;
size_t m_out;
TensorViewInfo m_arg0;
TensorViewInfo m_arg1;
TensorViewInfo m_out;
};
}
}
......
......@@ -25,17 +25,14 @@ namespace ngraph
{
namespace eigen
{
template <typename TA,typename TB>
void select(TA arg0, TB arg1, TB arg2, TB out)
{
set_map_array(&*out, get_map_array(&*arg0).select(get_map_array(&*arg1),get_map_array(&*arg2)));
}
template <typename ET>
class SelectInstruction : public Instruction
{
public:
SelectInstruction(size_t arg0, size_t arg1, size_t arg2, size_t out)
SelectInstruction(TensorViewInfo arg0,
TensorViewInfo arg1,
TensorViewInfo arg2,
TensorViewInfo out)
: m_arg0(arg0)
, m_arg1(arg1)
, m_arg2(arg2)
......@@ -45,18 +42,17 @@ namespace ngraph
virtual void execute(CallFrame& call_frame) const override
{
runtime::eigen::select(
call_frame.get_parameterized_tensor_view<element::Bool>(m_arg0),
call_frame.get_parameterized_tensor_view<ET>(m_arg1),
call_frame.get_parameterized_tensor_view<ET>(m_arg2),
call_frame.get_parameterized_tensor_view<ET>(m_out));
EigenArray1d<ET>(call_frame, m_out) =
EigenArray1d<element::Bool>(call_frame, m_arg0)
.select(EigenArray1d<ET>(call_frame, m_arg1),
EigenArray1d<ET>(call_frame, m_arg2));
}
protected:
size_t m_arg0;
size_t m_arg1;
size_t m_arg2;
size_t m_out;
TensorViewInfo m_arg0;
TensorViewInfo m_arg1;
TensorViewInfo m_arg2;
TensorViewInfo m_out;
};
}
}
......
......@@ -25,17 +25,11 @@ namespace ngraph
{
namespace eigen
{
template <typename T>
void subtract(T arg0, T arg1, T out)
{
set_map_array(&*out, get_map_array(&*arg0) - get_map_array(&*arg1));
}
template <typename ET>
class SubtractInstruction : public Instruction
{
public:
SubtractInstruction(size_t arg0, size_t arg1, size_t out)
SubtractInstruction(TensorViewInfo arg0, TensorViewInfo arg1, TensorViewInfo out)
: m_arg0(arg0)
, m_arg1(arg1)
, m_out(out)
......@@ -44,16 +38,14 @@ namespace ngraph
virtual void execute(CallFrame& call_frame) const override
{
runtime::eigen::subtract(
call_frame.get_parameterized_tensor_view<ET>(m_arg0),
call_frame.get_parameterized_tensor_view<ET>(m_arg1),
call_frame.get_parameterized_tensor_view<ET>(m_out));
EigenArray1d<ET>(call_frame, m_out) =
EigenArray1d<ET>(call_frame, m_arg0) - EigenArray1d<ET>(call_frame, m_arg1);
}
protected:
size_t m_arg0;
size_t m_arg1;
size_t m_out;
TensorViewInfo m_arg0;
TensorViewInfo m_arg1;
TensorViewInfo m_out;
};
}
}
......
This diff is collapsed.
......@@ -19,6 +19,7 @@
#include <unordered_map>
#include "ngraph/descriptor/input.hpp"
#include "ngraph/descriptor/layout/dense_tensor_view_layout.hpp"
#include "ngraph/descriptor/output.hpp"
#include "ngraph/function.hpp"
#include "ngraph/node.hpp"
......@@ -73,6 +74,8 @@
using namespace std;
using namespace ngraph::runtime;
using ngraph::descriptor::layout::DenseTensorViewLayout;
ExternalFunction::ExternalFunction(const std::shared_ptr<ngraph::Function>& function,
bool release_function)
: m_function(function)
......@@ -86,25 +89,26 @@ ExternalFunction::ExternalFunction(const std::shared_ptr<ngraph::Function>& func
op_map[type_index(typeid(op_class))] = [](const Node* n, \
ExternalFunction* ef, \
FunctionMap& function_map, \
const std::vector<size_t>& in, \
const std::vector<size_t>& out)
const std::vector<TensorViewInfo>& in, \
const std::vector<TensorViewInfo>& out)
#define REGISTER_INSTRUCTION(op_class, instr_class, ...) \
REGISTER_TO_OP_MAP(op_class) { \
ef->get_instructions()->push_back(make_shared<instr_class>(__VA_ARGS__)); \
}
#define REGISTER_UNOP(op_class, instr_class) \
// Versions the include the descriptor
#define REGISTER_UNOP(op_class, instr_class) \
REGISTER_INSTRUCTION(op_class, instr_class, in[0], out[0])
#define REGISTER_BINOP(op_class, instr_class) \
#define REGISTER_BINOP(op_class, instr_class) \
REGISTER_INSTRUCTION(op_class, instr_class, in[0], in[1], out[0])
#define REGISTER_TERNOP(op_class, instr_class) \
#define REGISTER_TERNOP(op_class, instr_class) \
REGISTER_INSTRUCTION(op_class, instr_class, in[0], in[1], in[2], out[0])
// Define code generators for handled ops.
ExternalFunction::OpMap& ExternalFunction::get_op_map()
{
static bool initialized = false;
static bool initialized = false;
static OpMap op_map;
if (!initialized)
{
......@@ -145,8 +149,8 @@ ExternalFunction::OpMap& ExternalFunction::get_op_map()
if (result_shape.size() == 1)
{
ef->get_instructions()->push_back(
make_shared<runtime::eigen::ConcatVectorInstruction<element::Float32>>(in,
out[0]));
make_shared<runtime::eigen::ConcatVectorInstruction<element::Float32>>(
in, out[0]));
}
else if (result_shape.size() == 2)
{
......@@ -184,14 +188,14 @@ ExternalFunction::OpMap& ExternalFunction::get_op_map()
{
ef->get_instructions()->push_back(
make_shared<runtime::eigen::ScalarTensorProductInstruction<element::Float32>>(
in[0], in[1], out[0]));
in[0].get_index(), in[1].get_index(), out[0].get_index()));
}
else if (arg1_shape.size() == 0)
{
// If arg1 is the scalar, do the same thing but switch the order of operands.
ef->get_instructions()->push_back(
make_shared<runtime::eigen::ScalarTensorProductInstruction<element::Float32>>(
in[1], in[0], out[0]));
in[1].get_index(), in[0].get_index(), out[0].get_index()));
}
// If arg0 and arg1 are both vectors, emit a dot product.
......@@ -199,7 +203,7 @@ ExternalFunction::OpMap& ExternalFunction::get_op_map()
{
ef->get_instructions()->push_back(
make_shared<runtime::eigen::DotInstruction<element::Float32>>(
in[0], in[1], out[0]));
in[0].get_index(), in[1].get_index(), out[0].get_index()));
}
// If arg0 is a matrix and arg1 is a vector, emit a matrix-vector product.
......@@ -207,7 +211,7 @@ ExternalFunction::OpMap& ExternalFunction::get_op_map()
{
ef->get_instructions()->push_back(
make_shared<runtime::eigen::MatrixVectorProductInstruction<element::Float32>>(
in[0], in[1], out[0]));
in[0].get_index(), in[1].get_index(), out[0].get_index()));
}
// If arg0 and arg1 are both matrices, emit a matrix product.
......@@ -215,7 +219,7 @@ ExternalFunction::OpMap& ExternalFunction::get_op_map()
{
ef->get_instructions()->push_back(
make_shared<runtime::eigen::MatrixMultInstruction<element::Float32>>(
in[0], in[1], out[0]));
in[0].get_index(), in[1].get_index(), out[0].get_index()));
}
else
......@@ -234,7 +238,7 @@ ExternalFunction::OpMap& ExternalFunction::get_op_map()
ef->get_instructions()->push_back(
make_shared<runtime::eigen::CopyInstruction<element::Float32>>(
in.at(get_tuple_element->get_n()), out.at(0)));
in.at(get_tuple_element->get_n()).get_index(), out.at(0).get_index()));
};
// Tuple will be spliced out, with the users of out connected to the corresponding in's source, but, for now, we need to copy.
......@@ -243,8 +247,8 @@ ExternalFunction::OpMap& ExternalFunction::get_op_map()
for (size_t i = 0; i < in.size(); ++i)
{
ef->get_instructions()->push_back(
make_shared<runtime::eigen::CopyInstruction<element::Float32>>(in.at(i),
out.at(i)));
make_shared<runtime::eigen::CopyInstruction<element::Float32>>(
in.at(i).get_index(), out.at(i).get_index()));
}
};
......@@ -295,6 +299,22 @@ void ExternalFunction::compile(FunctionMap& function_map)
pass_manager.register_pass<pass::AssignTensors>();
pass_manager.run_passes(m_function);
// Turn this into a pass
// Assign layouts
// For now, just make everyone row-major.
for (const Node* node : pass_manager.get_call_graph())
{
for (const descriptor::Output& output : node->get_outputs())
{
auto tv = output.get_tensor_view();
if (nullptr == tv->get_tensor_view_layout())
{
auto layout = std::make_shared<DenseTensorViewLayout>(*tv);
tv->set_tensor_view_layout(layout);
}
}
}
// Determine tensor requirements for the call frame
unordered_map<shared_ptr<ngraph::descriptor::TensorView>, size_t> tensor_index;
// First come the function inputs
......@@ -342,18 +362,18 @@ void ExternalFunction::compile(FunctionMap& function_map)
{
throw ngraph_error("Unhandled op during code generation");
}
std::vector<size_t> in;
std::vector<TensorViewInfo> in;
for (const descriptor::Input& input : node->get_inputs())
{
const descriptor::Output& output = input.get_output();
auto tv = output.get_tensor_view();
in.push_back(tensor_index.at(tv));
in.push_back({tensor_index.at(tv), tv});
}
std::vector<size_t> out;
std::vector<TensorViewInfo> out;
for (const descriptor::Output& output : node->get_outputs())
{
auto tv = output.get_tensor_view();
out.push_back(tensor_index.at(tv));
out.push_back({tensor_index.at(tv), tv});
}
handler_it->second(node, this, function_map, in, out);
}
......
......@@ -20,6 +20,7 @@
#include <unordered_map>
#include "ngraph/function.hpp"
#include "ngraph/runtime/tensor_view_info.hpp"
namespace ngraph
{
......@@ -32,8 +33,8 @@ namespace ngraph
using OpFunction = std::function<void(const ngraph::Node*,
ExternalFunction*,
FunctionMap&,
const std::vector<size_t>& inputs,
const std::vector<size_t>& outputs)>;
const std::vector<TensorViewInfo>& inputs,
const std::vector<TensorViewInfo>& outputs)>;
using OpMap = std::unordered_map<std::type_index, OpFunction>;
public:
......
......@@ -14,17 +14,25 @@
#pragma once
#include <memory>
#include "ngraph/runtime/call_frame.hpp"
namespace ngraph
{
namespace runtime
{
class CallFrame;
// An interpreter for an Op
/// @brief An interpreter for an Op
///
/// The call_frame has a vector of instructions and calls execute on each instruction, passing it the call_frame.
/// Instructions get argument, result, and intermediate tensor views from the call frame. Instructions may also
/// set a flag in the call_frame to end execution, or adjust execution by modifying the position in the instruction vector.
class Instruction
{
public:
virtual ~Instruction(){}
virtual ~Instruction() {}
virtual void execute(CallFrame& call_frame) const = 0;
};
}
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <memory>
namespace ngraph
{
namespace runtime
{
/// @brief Compile-time information about a tensor view.
///
/// Contains the offset of the tensor view in the call frame and the tensor descriptor.
class TensorViewInfo
{
public:
TensorViewInfo(size_t index,
const std::shared_ptr<ngraph::descriptor::TensorView>& descriptor)
: m_index(index)
, m_layout(descriptor->get_tensor_view_layout())
{
}
size_t get_index() const { return m_index; }
std::shared_ptr<ngraph::descriptor::layout::TensorViewLayout>
get_tensor_view_layout() const
{
return m_layout;
}
template <typename LT>
std::shared_ptr<LT> get_layout() const
{
return std::static_pointer_cast<LT>(m_layout);
}
protected:
size_t m_index;
std::shared_ptr<ngraph::descriptor::layout::TensorViewLayout> m_layout;
};
}
}
......@@ -17,7 +17,6 @@
#include <memory>
#include <vector>
#include "ngraph/runtime/call_frame.hpp"
#include "ngraph/runtime/parameterized_tensor_view.hpp"
#include "ngraph/runtime/tuple.hpp"
#include "ngraph/runtime/value.hpp"
......
......@@ -32,7 +32,6 @@ set (SRC
pass_liveness.cpp
pass_manager.cpp
pass_memory_layout.cpp
runtime.cpp
shape.cpp
tensor.cpp
test_tools.cpp
......
......@@ -165,19 +165,16 @@ TEST(execute, test_concat_matrix_colwise)
// Create some tensors for input/output
auto a = ngraph::runtime::make_tensor<element::Float32>(shape_a);
*a = vector<float>{2, 4,
8, 16};
*a = vector<float>{2, 4, 8, 16};
auto b = ngraph::runtime::make_tensor<element::Float32>(shape_b);
*b = vector<float>{ 1, 2, 4,
8,16,32};
*b = vector<float>{1, 2, 4, 8, 16, 32};
auto c = ngraph::runtime::make_tensor<element::Float32>(shape_c);
*c = vector<float>{ 2, 3, 5,
7,11,13};
*c = vector<float>{2, 3, 5, 7, 11, 13};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape_r);
(*cf)({a,b,c}, {result});
ASSERT_EQ((vector<float>{ 2, 4, 1, 2, 4, 2, 3, 5,
8, 16, 8, 16,32, 7, 11, 13}), result->get_vector());
(*cf)({a, b, c}, {result});
ASSERT_EQ((vector<float>{2, 4, 1, 2, 4, 2, 3, 5, 8, 16, 8, 16, 32, 7, 11, 13}),
result->get_vector());
}
TEST(execute, test_concat_matrix_rowwise)
......@@ -197,27 +194,16 @@ TEST(execute, test_concat_matrix_rowwise)
// Create some tensors for input/output
auto a = ngraph::runtime::make_tensor<element::Float32>(shape_a);
*a = vector<float>{2, 4,
8, 16};
*a = vector<float>{2, 4, 8, 16};
auto b = ngraph::runtime::make_tensor<element::Float32>(shape_b);
*b = vector<float>{ 1, 2,
4, 8,
16,32};
*b = vector<float>{1, 2, 4, 8, 16, 32};
auto c = ngraph::runtime::make_tensor<element::Float32>(shape_c);
*c = vector<float>{ 2, 3,
5, 7,
11,13};
*c = vector<float>{2, 3, 5, 7, 11, 13};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape_r);
(*cf)({a,b,c}, {result});
ASSERT_EQ((vector<float>{ 2, 4,
8, 16,
1, 2,
4, 8,
16, 32,
2, 3,
5, 7,
11, 13}), result->get_vector());
(*cf)({a, b, c}, {result});
ASSERT_EQ((vector<float>{2, 4, 8, 16, 1, 2, 4, 8, 16, 32, 2, 3, 5, 7, 11, 13}),
result->get_vector());
}
TEST(execute, test_concat_vector)
......@@ -237,15 +223,15 @@ TEST(execute, test_concat_vector)
// Create some tensors for input/output
auto a = ngraph::runtime::make_tensor<element::Float32>(shape_a);
*a = vector<float>{2,4,8,16};
*a = vector<float>{2, 4, 8, 16};
auto b = ngraph::runtime::make_tensor<element::Float32>(shape_b);
*b = vector<float>{1,2,4,8,16,32};
*b = vector<float>{1, 2, 4, 8, 16, 32};
auto c = ngraph::runtime::make_tensor<element::Float32>(shape_c);
*c = vector<float>{18,19};
*c = vector<float>{18, 19};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape_r);
(*cf)({a,b,c}, {result});
ASSERT_EQ((vector<float>{2,4,8,16,1,2,4,8,16,32,18,19}), result->get_vector());
(*cf)({a, b, c}, {result});
ASSERT_EQ((vector<float>{2, 4, 8, 16, 1, 2, 4, 8, 16, 32, 18, 19}), result->get_vector());
}
TEST(execute, test_divide)
......@@ -311,13 +297,13 @@ TEST(execute, test_dot1d)
*b = vector<float>{1, 2, 4, 8};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape_r);
(*cf)({a,b}, {result});
(*cf)({a, b}, {result});
ASSERT_EQ((vector<float>{170}), result->get_vector());
}
TEST(execute, test_dot2d)
{
auto shape = Shape{2,2};
auto shape = Shape{2, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto shape_r = Shape{2,2};
......@@ -329,16 +315,13 @@ TEST(execute, test_dot2d)
// Create some tensors for input/output
auto a = ngraph::runtime::make_tensor<element::Float32>(shape);
*a = vector<float>{1, 2,
3, 4};
*a = vector<float>{1, 2, 3, 4};
auto b = ngraph::runtime::make_tensor<element::Float32>(shape);
*b = vector<float>{5, 6,
7, 8};
*b = vector<float>{5, 6, 7, 8};
auto result = ngraph::runtime::make_tensor<element::Float32>(shape_r);
(*cf)({a,b}, {result});
ASSERT_EQ((vector<float>{19,22,
43,50}), result->get_vector());
(*cf)({a, b}, {result});
ASSERT_EQ((vector<float>{19, 22, 43, 50}), result->get_vector());
}
TEST(execute, test_dot_scalar_tensor_arg0)
......@@ -471,10 +454,14 @@ TEST(execute, test_log)
// Create some tensors for input/output
auto a = ngraph::runtime::make_tensor<element::Float32>(shape);
*a = vector<float>{expf(1), expf(2), expf(3), expf(4), expf(5), expf(6), expf(7), expf(8)};
vector<float> loga;
for (auto elt : a->get_vector()){
loga.push_back(logf(elt));
}
auto result = ngraph::runtime::make_tensor<element::Float32>(shape);
(*cf)({a}, {result});
ASSERT_EQ((vector<float>{1, 2, 3, 4, 5, 6, 7, 8}), result->get_vector());
ASSERT_EQ(loga, result->get_vector());
}
TEST(execute, test_maximum)
......@@ -619,7 +606,7 @@ TEST(execute, test_tensor_constant)
auto result = ngraph::runtime::make_tensor<element::Float32>(shape);
(*cf)({}, {result});
ASSERT_EQ((vector<float>{1,2,3,4,5,6,7,8}), result->get_vector());
ASSERT_EQ((vector<float>{1, 2, 3, 4, 5, 6, 7, 8}), result->get_vector());
}
TEST(execute, test_tensor_constant_with_op)
......@@ -637,7 +624,7 @@ TEST(execute, test_tensor_constant_with_op)
auto result = ngraph::runtime::make_tensor<element::Float32>(shape);
(*cf)({}, {result});
ASSERT_EQ((vector<float>{1,2,3,4,5,6,7,8}), result->get_vector());
ASSERT_EQ((vector<float>{1, 2, 3, 4, 5, 6, 7, 8}), result->get_vector());
}
TEST(execute, test_function_call)
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <memory>
#include <vector>
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "ngraph/runtime/eigen/add.hpp"
#include "ngraph/runtime/eigen/multiply.hpp"
#include "ngraph/runtime/eigen/return.hpp"
using namespace std;
using namespace ngraph;
using namespace ngraph::runtime;
namespace ngeigen = ngraph::runtime::eigen;
TEST(runtime, test_add)
{
auto x = ngraph::runtime::make_tensor<element::Float32>(Shape{2, 2});
x->get_vector() = {1, 2, 3, 4};
auto y = ngraph::runtime::make_tensor<element::Float32>(Shape{2, 2});
y->get_vector() = {5, 6, 7, 8};
auto z = ngraph::runtime::make_tensor<element::Float32>(Shape{2, 2});
ngraph::runtime::eigen::add(x, y, z);
ASSERT_EQ((vector<float>{6, 8, 10, 12}), z->get_vector());
}
TEST(runtime, test_multiply)
{
auto x = ngraph::runtime::make_tensor<element::Float32>(Shape{2, 2});
x->get_vector() = {1, 2, 3, 4};
auto y = ngraph::runtime::make_tensor<element::Float32>(Shape{2, 2});
y->get_vector() = {5, 6, 7, 8};
auto z = ngraph::runtime::make_tensor<element::Float32>(Shape{2, 2});
ngraph::runtime::eigen::multiply(x, y, z);
ASSERT_EQ((vector<float>{5, 12, 21, 32}), z->get_vector());
}
TEST(runtime, test_add_multiply)
{
// Inputs:
// 0 : a
// 1 : b
// 2 : c
// Outputs:
// 3 : result
// Temporaries
// 4: t0
auto instructions = make_shared<std::vector<std::shared_ptr<ngraph::runtime::Instruction>>>();
// a + b -> t0
instructions->push_back(make_shared<ngeigen::AddInstruction<element::Float32>>(0, 1, 4));
// t0 * c -> result
instructions->push_back(make_shared<ngeigen::MultiplyInstruction<element::Float32>>(4, 2, 3));
instructions->push_back(make_shared<ngeigen::ReturnInstruction>());
runtime::CallFrame cf{
3, 1, {ngraph::runtime::make_tensor<element::Float32>(Shape{2, 2})}, 0, instructions};
// Create some tensors for input/output
auto a = ngraph::runtime::make_tensor<element::Float32>(Shape{2, 2});
a->get_vector() = {1, 2, 3, 4};
auto b = ngraph::runtime::make_tensor<element::Float32>(Shape{2, 2});
b->get_vector() = {5, 6, 7, 8};
auto c = ngraph::runtime::make_tensor<element::Float32>(Shape{2, 2});
c->get_vector() = {9, 10, 11, 12};
auto result = ngraph::runtime::make_tensor<element::Float32>(Shape{2, 2});
cf({a, b, c}, {result});
ASSERT_EQ((vector<float>{54, 80, 110, 144}), result->get_vector());
cf({b, a, c}, {result});
ASSERT_EQ((vector<float>{54, 80, 110, 144}), result->get_vector());
cf({a, c, b}, {result});
ASSERT_EQ((vector<float>{50, 72, 98, 128}), result->get_vector());
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment