Commit 4df55e63 authored by Diego Caballero's avatar Diego Caballero Committed by nmostafa

[MLIR] Enable multi-node sub-graph (#34)

It introduces end-to-end support for sub-graphs with multiple nodes. It
adds a test with a dot+add ops.
Co-authored-by: 's avatarDiego Caballero <diego.caballero@intel.com>
Co-authored-by: 's avatarNagy Mostafa <nagy.mostafa@gmail.com>
parent 209e3ccc
......@@ -240,23 +240,22 @@ void MLIRCompiler::optimize()
void MLIRCompiler::build_ng_dialect()
{
const NodeVector& sub_graph = m_compiled_kernel->get_node_list();
NGRAPH_ASSERT(sub_graph.size() == 1) << "Supporting code-gen for a single node for now";
auto np = sub_graph[0];
auto it = op_dispatcher.find(TI(*np));
if (it == op_dispatcher.end())
{
throw unsupported_op{std::string{"The MLIR backend doesn't currently implement the '"} +
np->description() + "' operation"};
}
mlir::Value* mlir_value = it->second(*this, np.get());
// builders that have multiple result values will update the value map, and set their ret values to null
if (mlir_value)
for(auto np : sub_graph)
{
update_tensor_value(np->get_output_tensor_ptr().get(), mlir_value);
auto it = op_dispatcher.find(TI(*np));
if (it == op_dispatcher.end())
{
throw unsupported_op{std::string{"The MLIR backend doesn't currently implement the '"} +
np->description() + "' operation"};
}
mlir::Value* mlir_value = it->second(*this, np.get());
// builders that have multiple result values will update the value map, and set their ret values to null
if (mlir_value)
{
update_tensor_value(np->get_output_tensor_ptr().get(), mlir_value);
}
}
create_return();
}
......
......@@ -57,6 +57,36 @@ NGRAPH_TEST(${BACKEND_NAME}, add)
(test::NDArray<float, 2>({{6, 8}, {10, 12}})).get_vector()));
}
NGRAPH_TEST(${BACKEND_NAME}, dot_add)
{
Shape shape_in1{2, 3};
Shape shape_in2{3, 3};
Shape shape_out{2, 3};
auto A = make_shared<op::Parameter>(element::f32, shape_in1);
auto B = make_shared<op::Parameter>(element::f32, shape_in2);
auto dot = make_shared<op::Dot>(A, B);
auto C = make_shared<op::Parameter>(element::f32, shape_out);
auto add = make_shared<op::Add>(dot, C);
auto f = make_shared<Function>(add, ParameterVector{A, B, C});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
shared_ptr<runtime::Tensor> a = backend->create_tensor(element::f32, shape_in1);
shared_ptr<runtime::Tensor> b = backend->create_tensor(element::f32, shape_in2);
shared_ptr<runtime::Tensor> c = backend->create_tensor(element::f32, shape_out);
shared_ptr<runtime::Tensor> result = backend->create_tensor(element::f32, shape_out);
copy_data(a, vector<float>{1.f, 2.f, 3.f, 4.f, 5.f, 6.f});
copy_data(b, vector<float>{1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f});
copy_data(c, vector<float>{5.f, 4.f, 3.f, 2.f, 1.f, 0.f});
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a, b, c});
EXPECT_TRUE(test::all_close_f(read_vector<float>(result),
vector<float>{35.f, 40.f, 45.f, 68.f, 82.f, 96.f}));
}
NGRAPH_TEST(${BACKEND_NAME}, add_overload)
{
Shape shape{2, 2};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment