Commit 5be99c0a authored by Nishant Patel's avatar Nishant Patel Committed by Scott Cyphers

Use mkldnn reorder only for transpose/dimshuffles. (#1188)

* Usage of mkldnn reshape updated

* update reshape condition for mkldnn

* Add a test case and order in which conditions are checked
parent e07637c0
......@@ -554,17 +554,32 @@ namespace ngraph
void CPUAssignment::ASSIGN_DECL(ngraph::op::Reshape)
{
auto reshape = static_cast<op::Reshape*>(node);
auto arg0_shape = node->get_input_shape(0);
auto result_shape = node->get_output_shape(0);
auto axis_order = reshape->get_input_order();
bool flag = true;
// Use Eigen for 3D
if (node->get_input_element_type(0) == element::f32 &&
node->get_input_shape(0).size() < TENSOR_MAX_DIMS &&
node->get_input_shape(0).size() > 3 &&
node->get_input_shape(0).size() == node->get_output_shape(0).size())
arg0_shape.size() < TENSOR_MAX_DIMS && arg0_shape.size() > 3 &&
arg0_shape.size() == result_shape.size())
{
auto op_annotations =
std::make_shared<ngraph::runtime::cpu::CPUOpAnnotations>();
op_annotations->set_mkldnn_op(true);
reshape->set_op_annotations(op_annotations);
for (size_t i = 0; i < axis_order.size(); i++)
{
if (arg0_shape[axis_order[i]] != result_shape[i])
{
flag = false;
break;
}
}
if (flag)
{
auto op_annotations =
std::make_shared<ngraph::runtime::cpu::CPUOpAnnotations>();
op_annotations->set_mkldnn_op(true);
reshape->set_op_annotations(op_annotations);
}
}
}
......
......@@ -2545,6 +2545,31 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_4d_transpose)
read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, reshape_4d_no_transpose)
{
vector<float> a_data(2 * 2 * 5 * 5);
for (int i = 0; i < 2 * 2 * 5 * 5; i++)
{
a_data[i] = float(i + 1);
}
Shape shape_a{2, 2, 5, 5};
Shape shape_r{2, 5, 5, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
auto r = make_shared<op::Reshape>(A, AxisVector{0, 1, 2, 3}, shape_r);
auto f = make_shared<Function>(r, op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, a_data);
auto result = backend->create_tensor(element::f32, shape_r);
backend->call(f, {result}, {a});
EXPECT_EQ(a_data, read_vector<float>(result));
}
//
// Numpy:
//
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment