Commit 4fa2d1a1 authored by Jaikrishnan Menon's avatar Jaikrishnan Menon

CPU: Implement op::Tuple and op::GetTupleElement

parent 8895e895
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include "ngraph/node.hpp" #include "ngraph/node.hpp"
#include "ngraph/descriptor/layout/dense_tensor_view_layout.hpp" #include "ngraph/descriptor/layout/dense_tensor_view_layout.hpp"
#include "ngraph/ops/get_tuple_element.hpp"
#include "ngraph/runtime/tensor_view_info.hpp" #include "ngraph/runtime/tensor_view_info.hpp"
#include "ngraph/runtime/cpu/external_function.hpp" #include "ngraph/runtime/cpu/external_function.hpp"
#include "ngraph/runtime/cpu/emitter.hpp" #include "ngraph/runtime/cpu/emitter.hpp"
...@@ -43,7 +44,7 @@ static unordered_map<type_index, string> element_type_names = {{TI(ngraph::eleme ...@@ -43,7 +44,7 @@ static unordered_map<type_index, string> element_type_names = {{TI(ngraph::eleme
#define EIGEN_VECTOR_FORMAT(x) "{" + to_string(x) + "}" #define EIGEN_VECTOR_FORMAT(x) "{" + to_string(x) + "}"
#define EIGEN_MATRIX_FORMAT(x) //#define EIGEN_MATRIX_FORMAT(x)
void Emitter::EmitNop(const ngraph::Node* n, void Emitter::EmitNop(const ngraph::Node* n,
ExternalFunction* ef, ExternalFunction* ef,
...@@ -107,3 +108,43 @@ void Emitter::EmitMultiply(const ngraph::Node* n, ...@@ -107,3 +108,43 @@ void Emitter::EmitMultiply(const ngraph::Node* n,
EIGEN_VECTOR_FORMAT(inputs[1].get_layout<DenseTensorViewLayout>()->get_size()) ");\n" EIGEN_VECTOR_FORMAT(inputs[1].get_layout<DenseTensorViewLayout>()->get_size()) ");\n"
" }\n"; " }\n";
} }
void Emitter::EmitGetTupleElement(const ngraph::Node* n,
ExternalFunction* ef,
FunctionMap& function_map,
const std::vector<TensorViewInfo>& inputs,
const std::vector<TensorViewInfo>& outputs)
{
auto get_tuple_element = static_cast<const op::GetTupleElement*>(n);
auto result_tensor_type =
dynamic_pointer_cast<const TensorViewType>(n->get_value_type());
assert(result_tensor_type);
auto& result_element_type = result_tensor_type->get_element_type();
TU += " {\n"
" call_frame->get_parameterized_tensor_view<" + element_type_names[TI(result_element_type)] + ">(" +
to_string(outputs.at(0).get_index()) + ")->get_vector() =\n"
" call_frame->get_parameterized_tensor_view<" + element_type_names[TI(result_element_type)] + ">(" +
to_string(inputs.at(get_tuple_element->get_n()).get_index()) + ")->get_vector();\n"
" }\n";
}
void Emitter::EmitTuple(const ngraph::Node* n,
ExternalFunction* ef,
FunctionMap& function_map,
const std::vector<TensorViewInfo>& inputs,
const std::vector<TensorViewInfo>& outputs)
{
assert(inputs.size() == outputs.size());
TU += " {\n";
for (size_t i = 0; i < inputs.size(); ++i)
{
auto& et = inputs.at(i).get_tensor_view_layout()->get_element_type();
TU += " call_frame->get_parameterized_tensor_view<" + element_type_names[TI(et)] + ">(" +
to_string(outputs.at(i).get_index()) + ")->get_vector() =\n"
" call_frame->get_parameterized_tensor_view<" + element_type_names[TI(et)] + ">(" +
to_string(inputs.at(i).get_index()) + ")->get_vector();\n";
}
TU += " }\n";
}
...@@ -60,6 +60,18 @@ namespace ngraph ...@@ -60,6 +60,18 @@ namespace ngraph
const std::vector<TensorViewInfo>& inputs, const std::vector<TensorViewInfo>& inputs,
const std::vector<TensorViewInfo>& outputs); const std::vector<TensorViewInfo>& outputs);
void EmitGetTupleElement(const ngraph::Node*,
ExternalFunction*,
FunctionMap&,
const std::vector<TensorViewInfo>& inputs,
const std::vector<TensorViewInfo>& outputs);
void EmitTuple(const ngraph::Node*,
ExternalFunction*,
FunctionMap&,
const std::vector<TensorViewInfo>& inputs,
const std::vector<TensorViewInfo>& outputs);
}; };
} }
} }
......
...@@ -69,7 +69,10 @@ using ngraph::descriptor::layout::DenseTensorViewLayout; ...@@ -69,7 +69,10 @@ using ngraph::descriptor::layout::DenseTensorViewLayout;
static const OpMap dispatcher{{TI(ngraph::op::Add), &Emitter::EmitAdd}, static const OpMap dispatcher{{TI(ngraph::op::Add), &Emitter::EmitAdd},
{TI(ngraph::op::Dot), &Emitter::EmitDot}, {TI(ngraph::op::Dot), &Emitter::EmitDot},
{TI(ngraph::op::Multiply), &Emitter::EmitMultiply}, {TI(ngraph::op::Multiply), &Emitter::EmitMultiply},
{TI(ngraph::op::Parameter), &Emitter::EmitNop}}; {TI(ngraph::op::Parameter), &Emitter::EmitNop},
{TI(ngraph::op::GetTupleElement), &Emitter::EmitGetTupleElement},
{TI(ngraph::op::Tuple), &Emitter::EmitTuple}
};
#undef TI #undef TI
......
...@@ -81,7 +81,6 @@ TEST(cpu, abc) ...@@ -81,7 +81,6 @@ TEST(cpu, abc)
ASSERT_EQ((vector<float>{50, 72, 98, 128}), result->get_vector()); ASSERT_EQ((vector<float>{50, 72, 98, 128}), result->get_vector());
} }
/*
TEST(cpu, abc_int64) TEST(cpu, abc_int64)
{ {
auto shape = Shape{2, 2}; auto shape = Shape{2, 2};
...@@ -91,7 +90,7 @@ TEST(cpu, abc_int64) ...@@ -91,7 +90,7 @@ TEST(cpu, abc_int64)
auto rt = make_shared<TensorViewType>(element::Int64::element_type(), shape); auto rt = make_shared<TensorViewType>(element::Int64::element_type(), shape);
auto f = make_shared<Function>((A + B) * C, rt, op::Parameters{A, B, C}); auto f = make_shared<Function>((A + B) * C, rt, op::Parameters{A, B, C});
auto manager = runtime::Manager::get("NGVM"); auto manager = runtime::Manager::get("CPU");
auto external = manager->compile(f); auto external = manager->compile(f);
auto backend = manager->allocate_backend(); auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external); auto cf = backend->make_call_frame(external);
...@@ -116,7 +115,7 @@ TEST(cpu, abc_int64) ...@@ -116,7 +115,7 @@ TEST(cpu, abc_int64)
} }
// Same as abc, but using tuples for input and output // Same as abc, but using tuples for input and output
TEST(execute, abc_tuple) TEST(cpu, abc_tuple)
{ {
auto shape = Shape{2, 2}; auto shape = Shape{2, 2};
...@@ -131,7 +130,7 @@ TEST(execute, abc_tuple) ...@@ -131,7 +130,7 @@ TEST(execute, abc_tuple)
auto f = make_shared<Function>( auto f = make_shared<Function>(
make_shared<op::Tuple>(Nodes{(A + B) * C}), tensor_view_type, op::Parameters{ABC}); make_shared<op::Tuple>(Nodes{(A + B) * C}), tensor_view_type, op::Parameters{ABC});
auto manager = runtime::Manager::get("NGVM"); auto manager = runtime::Manager::get("CPU");
auto external = manager->compile(f); auto external = manager->compile(f);
auto backend = manager->allocate_backend(); auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external); auto cf = backend->make_call_frame(external);
...@@ -160,7 +159,7 @@ TEST(execute, abc_tuple) ...@@ -160,7 +159,7 @@ TEST(execute, abc_tuple)
} }
// Same as abc, but using tuples for input and output // Same as abc, but using tuples for input and output
TEST(execute, abc_tuple_int64) TEST(cpu, abc_tuple_int64)
{ {
auto shape = Shape{2, 2}; auto shape = Shape{2, 2};
...@@ -175,7 +174,7 @@ TEST(execute, abc_tuple_int64) ...@@ -175,7 +174,7 @@ TEST(execute, abc_tuple_int64)
auto f = make_shared<Function>( auto f = make_shared<Function>(
make_shared<op::Tuple>(Nodes{(A + B) * C}), tensor_view_type, op::Parameters{ABC}); make_shared<op::Tuple>(Nodes{(A + B) * C}), tensor_view_type, op::Parameters{ABC});
auto manager = runtime::Manager::get("NGVM"); auto manager = runtime::Manager::get("CPU");
auto external = manager->compile(f); auto external = manager->compile(f);
auto backend = manager->allocate_backend(); auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external); auto cf = backend->make_call_frame(external);
...@@ -203,6 +202,7 @@ TEST(execute, abc_tuple_int64) ...@@ -203,6 +202,7 @@ TEST(execute, abc_tuple_int64)
ASSERT_EQ((vector<element::Int64::type>{50, 72, 98, 128}), result->get_vector()); ASSERT_EQ((vector<element::Int64::type>{50, 72, 98, 128}), result->get_vector());
} }
/*
// Multiple retrive values // Multiple retrive values
TEST(execute, tuple_result) TEST(execute, tuple_result)
{ {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment