Commit 5be99c0a authored by Nishant Patel's avatar Nishant Patel Committed by Scott Cyphers

Use mkldnn reorder only for transpose/dimshuffles. (#1188)

* Usage of mkldnn reshape updated

* update reshape condition for mkldnn

* Add a test case and order in which conditions are checked
parent e07637c0
...@@ -554,12 +554,26 @@ namespace ngraph ...@@ -554,12 +554,26 @@ namespace ngraph
void CPUAssignment::ASSIGN_DECL(ngraph::op::Reshape) void CPUAssignment::ASSIGN_DECL(ngraph::op::Reshape)
{ {
auto reshape = static_cast<op::Reshape*>(node); auto reshape = static_cast<op::Reshape*>(node);
auto arg0_shape = node->get_input_shape(0);
auto result_shape = node->get_output_shape(0);
auto axis_order = reshape->get_input_order();
bool flag = true;
// Use Eigen for 3D // Use Eigen for 3D
if (node->get_input_element_type(0) == element::f32 && if (node->get_input_element_type(0) == element::f32 &&
node->get_input_shape(0).size() < TENSOR_MAX_DIMS && arg0_shape.size() < TENSOR_MAX_DIMS && arg0_shape.size() > 3 &&
node->get_input_shape(0).size() > 3 && arg0_shape.size() == result_shape.size())
node->get_input_shape(0).size() == node->get_output_shape(0).size()) {
for (size_t i = 0; i < axis_order.size(); i++)
{
if (arg0_shape[axis_order[i]] != result_shape[i])
{
flag = false;
break;
}
}
if (flag)
{ {
auto op_annotations = auto op_annotations =
std::make_shared<ngraph::runtime::cpu::CPUOpAnnotations>(); std::make_shared<ngraph::runtime::cpu::CPUOpAnnotations>();
...@@ -567,6 +581,7 @@ namespace ngraph ...@@ -567,6 +581,7 @@ namespace ngraph
reshape->set_op_annotations(op_annotations); reshape->set_op_annotations(op_annotations);
} }
} }
}
template <> template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::BatchNorm) void CPUAssignment::ASSIGN_DECL(ngraph::op::BatchNorm)
......
...@@ -2545,6 +2545,31 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_4d_transpose) ...@@ -2545,6 +2545,31 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_4d_transpose)
read_vector<float>(result)); read_vector<float>(result));
} }
NGRAPH_TEST(${BACKEND_NAME}, reshape_4d_no_transpose)
{
vector<float> a_data(2 * 2 * 5 * 5);
for (int i = 0; i < 2 * 2 * 5 * 5; i++)
{
a_data[i] = float(i + 1);
}
Shape shape_a{2, 2, 5, 5};
Shape shape_r{2, 5, 5, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
auto r = make_shared<op::Reshape>(A, AxisVector{0, 1, 2, 3}, shape_r);
auto f = make_shared<Function>(r, op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, a_data);
auto result = backend->create_tensor(element::f32, shape_r);
backend->call(f, {result}, {a});
EXPECT_EQ(a_data, read_vector<float>(result));
}
// //
// Numpy: // Numpy:
// //
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment