Commit efcfab75 authored by Jaikrishnan Menon's avatar Jaikrishnan Menon

CPU: Implement op::Reshape

parent eea59f20
......@@ -25,6 +25,7 @@
#include "ngraph/ops/concatenate.hpp"
#include "ngraph/ops/constant.hpp"
#include "ngraph/ops/get_tuple_element.hpp"
#include "ngraph/ops/reshape.hpp"
#include "ngraph/runtime/cpu/emitter.hpp"
#include "ngraph/runtime/cpu/external_function.hpp"
#include "ngraph/runtime/tensor_view_info.hpp"
......@@ -931,3 +932,68 @@ void Emitter::EMITTER_DECL(EmitConstant)
TU += "};\n }\n";
}
void Emitter::EMITTER_DECL(EmitReshape)
{
auto reshape = static_cast<const op::Reshape*>(n);
auto arg_type = reshape->get_arguments().at(0)->get_value_type();
auto arg_tensor_view_type = dynamic_pointer_cast<const TensorViewType>(arg_type);
assert(arg_tensor_view_type);
auto arg_shape = arg_tensor_view_type->get_shape();
auto arg_rank = arg_shape.size();
auto result_type = reshape->get_value_type();
auto result_tensor_view_type = dynamic_pointer_cast<const TensorViewType>(result_type);
assert(result_tensor_view_type);
auto result_shape = result_tensor_view_type->get_shape();
auto& result_element_type = result_tensor_view_type->get_element_type();
auto input_order = reshape->get_input_order();
bool same_layout = std::is_sorted(input_order.begin(), input_order.end());
size_t result_shape_product = 1;
for (auto i : result_shape)
{
result_shape_product *= i;
}
// If there is no layout change or we are just going from 1^n to 1^m or a zero-size tensor, we can just copy.
if (same_layout || result_shape_product < 2)
{
TU +=
" {\n"
" call_frame->get_parameterized_tensor_view<" +
element_type_names[TI(result_element_type)] + ">(" + to_string(outputs.at(0).get_index()) +
")->get_vector() =\n"
" call_frame->get_parameterized_tensor_view<" +
element_type_names[TI(result_element_type)] + ">(" +
to_string(inputs.at(0).get_index()) +
")->get_vector();\n"
" }\n";
}
// If there *is* a layout change in the 2D case, we transpose the input.
else if (arg_rank == 2)
{
auto arg0_layout = inputs[0].get_layout<DenseTensorViewLayout>();
auto out_layout = outputs[0].get_layout<DenseTensorViewLayout>();
TU += " {\n"
" auto arg0 = call_frame->get_tensor_view_data<" + element_type_names[TI(result_element_type)] +
">(" + to_string(inputs[0].get_index()) + ");\n"
" auto out = call_frame->get_tensor_view_data<" + element_type_names[TI(result_element_type)] +
">(" + to_string(outputs[0].get_index()) + ");\n"
" EigenMatrix<" + element_type_names[TI(result_element_type)] + ">(out, " +
EIGEN_MATRIX_FORMAT(out_layout->get_shape(), out_layout->get_strides()) + ") =\n"
" EigenMatrix<" + element_type_names[TI(result_element_type)] + ">(arg0, " +
EIGEN_MATRIX_FORMAT(arg0_layout->get_shape(), arg0_layout->get_strides()) + ").transpose();\n"
" }\n";
}
// Other cases (reordering of axes for tensors with rank>2) are not handled yet.
else
{
throw ngraph_error(
"Axis permutation in reshape is not implemented yet for tensors with rank>2");
}
}
......@@ -76,6 +76,7 @@ namespace ngraph
void EMITTER_DECL(EmitBroadcast);
void EMITTER_DECL(EmitConvert);
void EMITTER_DECL(EmitConstant);
void EMITTER_DECL(EmitReshape);
};
}
}
......
......@@ -47,6 +47,7 @@
#include "ngraph/ops/negative.hpp"
#include "ngraph/ops/not_equal.hpp"
#include "ngraph/ops/reduce.hpp"
#include "ngraph/ops/reshape.hpp"
#include "ngraph/ops/select.hpp"
#include "ngraph/ops/subtract.hpp"
#include "ngraph/ops/tuple.hpp"
......@@ -106,6 +107,7 @@ static const OpMap dispatcher{
{TI(ngraph::op::Broadcast), &Emitter::EmitBroadcast},
{TI(ngraph::op::Convert), &Emitter::EmitConvert},
{TI(ngraph::op::Constant), &Emitter::EmitConstant},
{TI(ngraph::op::Reshape), &Emitter::EmitReshape},
};
#undef TI
......
......@@ -1655,8 +1655,9 @@ TEST(execute, reduce_matrix_to_scalar_zero_by_zero)
ASSERT_EQ((vector<float>{}), a->get_vector());
ASSERT_EQ((vector<float>{99}), b->get_vector());
}
*/
TEST(execute, reshape_t2v_012)
TEST(cpu, reshape_t2v_012)
{
auto shape_a = Shape{2, 2, 3};
auto A = make_shared<op::Parameter>(
......@@ -1666,7 +1667,7 @@ TEST(execute, reshape_t2v_012)
auto r = make_shared<op::Reshape>(A, AxisVector{0, 1, 2}, shape_r);
auto f = make_shared<Function>(r, rt, op::Parameters{A});
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("CPU");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
......@@ -1680,7 +1681,7 @@ TEST(execute, reshape_t2v_012)
ASSERT_EQ((vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}), result->get_vector());
}
TEST(execute, reshape_t2s_012)
TEST(cpu, reshape_t2s_012)
{
auto shape_a = Shape{1, 1, 1};
auto A = make_shared<op::Parameter>(
......@@ -1690,7 +1691,7 @@ TEST(execute, reshape_t2s_012)
auto r = make_shared<op::Reshape>(A, AxisVector{0, 1, 2}, shape_r);
auto f = make_shared<Function>(r, rt, op::Parameters{A});
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("CPU");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
......@@ -1704,7 +1705,7 @@ TEST(execute, reshape_t2s_012)
ASSERT_EQ((vector<float>{6}), result->get_vector());
}
TEST(execute, reshape_t2s_120)
TEST(cpu, reshape_t2s_120)
{
auto shape_a = Shape{1, 1, 1};
auto A = make_shared<op::Parameter>(
......@@ -1714,7 +1715,7 @@ TEST(execute, reshape_t2s_120)
auto r = make_shared<op::Reshape>(A, AxisVector{1, 2, 0}, shape_r);
auto f = make_shared<Function>(r, rt, op::Parameters{A});
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("CPU");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
......@@ -1728,7 +1729,7 @@ TEST(execute, reshape_t2s_120)
ASSERT_EQ((vector<float>{6}), result->get_vector());
}
TEST(execute, reshape_s2t)
TEST(cpu, reshape_s2t)
{
auto shape_a = Shape{};
auto A = make_shared<op::Parameter>(
......@@ -1738,7 +1739,7 @@ TEST(execute, reshape_s2t)
auto r = make_shared<op::Reshape>(A, AxisVector{}, shape_r);
auto f = make_shared<Function>(r, rt, op::Parameters{A});
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("CPU");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
......@@ -1752,7 +1753,7 @@ TEST(execute, reshape_s2t)
ASSERT_EQ((vector<float>{42}), result->get_vector());
}
TEST(execute, reshape_v2m_col)
TEST(cpu, reshape_v2m_col)
{
auto shape_a = Shape{3};
auto A = make_shared<op::Parameter>(
......@@ -1762,7 +1763,7 @@ TEST(execute, reshape_v2m_col)
auto r = make_shared<op::Reshape>(A, AxisVector{0}, shape_r);
auto f = make_shared<Function>(r, rt, op::Parameters{A});
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("CPU");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
......@@ -1776,7 +1777,7 @@ TEST(execute, reshape_v2m_col)
ASSERT_EQ((vector<float>{1, 2, 3}), result->get_vector());
}
TEST(execute, reshape_v2m_row)
TEST(cpu, reshape_v2m_row)
{
auto shape_a = Shape{3};
auto A = make_shared<op::Parameter>(
......@@ -1786,7 +1787,7 @@ TEST(execute, reshape_v2m_row)
auto r = make_shared<op::Reshape>(A, AxisVector{0}, shape_r);
auto f = make_shared<Function>(r, rt, op::Parameters{A});
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("CPU");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
......@@ -1800,7 +1801,7 @@ TEST(execute, reshape_v2m_row)
ASSERT_EQ((vector<float>{1, 2, 3}), result->get_vector());
}
TEST(execute, reshape_v2t_middle)
TEST(cpu, reshape_v2t_middle)
{
auto shape_a = Shape{3};
auto A = make_shared<op::Parameter>(
......@@ -1810,7 +1811,7 @@ TEST(execute, reshape_v2t_middle)
auto r = make_shared<op::Reshape>(A, AxisVector{0}, shape_r);
auto f = make_shared<Function>(r, rt, op::Parameters{A});
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("CPU");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
......@@ -1824,7 +1825,7 @@ TEST(execute, reshape_v2t_middle)
ASSERT_EQ((vector<float>{1, 2, 3}), result->get_vector());
}
TEST(execute, reshape_m2m_same)
TEST(cpu, reshape_m2m_same)
{
auto shape_a = Shape{3, 3};
auto A = make_shared<op::Parameter>(
......@@ -1834,7 +1835,7 @@ TEST(execute, reshape_m2m_same)
auto r = make_shared<op::Reshape>(A, AxisVector{0, 1}, shape_r);
auto f = make_shared<Function>(r, rt, op::Parameters{A});
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("CPU");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
......@@ -1848,7 +1849,7 @@ TEST(execute, reshape_m2m_same)
ASSERT_EQ((vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9}), result->get_vector());
}
TEST(execute, reshape_m2m_transpose)
TEST(cpu, reshape_m2m_transpose)
{
auto shape_a = Shape{3, 3};
auto A = make_shared<op::Parameter>(
......@@ -1858,7 +1859,7 @@ TEST(execute, reshape_m2m_transpose)
auto r = make_shared<op::Reshape>(A, AxisVector{1, 0}, shape_r);
auto f = make_shared<Function>(r, rt, op::Parameters{A});
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("CPU");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
......@@ -1872,7 +1873,7 @@ TEST(execute, reshape_m2m_transpose)
ASSERT_EQ((vector<float>{1, 4, 7, 2, 5, 8, 3, 6, 9}), result->get_vector());
}
TEST(execute, reshape_m2m_dim_change_transpose)
TEST(cpu, reshape_m2m_dim_change_transpose)
{
auto shape_a = Shape{3, 2};
auto A = make_shared<op::Parameter>(
......@@ -1882,7 +1883,7 @@ TEST(execute, reshape_m2m_dim_change_transpose)
auto r = make_shared<op::Reshape>(A, AxisVector{1, 0}, shape_r);
auto f = make_shared<Function>(r, rt, op::Parameters{A});
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("CPU");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
......@@ -1896,6 +1897,7 @@ TEST(execute, reshape_m2m_dim_change_transpose)
ASSERT_EQ((vector<float>{1, 3, 5, 2, 4, 6}), result->get_vector());
}
/*
TEST(execute, sin)
{
auto shape = Shape{6};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment