Commit 22e783ff authored by Adam Procter's avatar Adam Procter Committed by Robert Kimball

Fix two bugs with concat for 0-size tensors (#1120)

* Fix bug with concat for 0-size tensors

* Simplify test for zero-length axes, per PR comments
parent 9441ea0c
......@@ -138,6 +138,11 @@ void ngraph::runtime::cpu::kernel::emit_concat(codegen::CodeWriter& writer,
for (size_t i = 0; i < args.size(); i++)
{
if (shape_size(in_shapes[i]) == 0)
{
continue;
}
Coordinate out_start_coord = Coordinate(out_shape.size(), 0);
out_start_coord[concatenation_axis] = concatenation_pos;
......
......@@ -87,10 +87,26 @@ namespace ngraph
((node->get_input_shape(0)).size() == 4 ||
(node->get_input_shape(0)).size() == 2))
{
auto op_annotations =
std::make_shared<ngraph::runtime::cpu::CPUOpAnnotations>();
op_annotations->set_mkldnn_op(true);
concat->set_op_annotations(op_annotations);
// MKLDNN seems to throw an exception when given tensors with 0-length
// dimensions, so don't assign it in such cases.
bool any_zero = false;
for (size_t i = 0; i < node->get_input_size(); i++)
{
if (shape_size(node->get_input_shape(i)) == 0)
{
any_zero = true;
break;
}
}
if (!any_zero)
{
auto op_annotations =
std::make_shared<ngraph::runtime::cpu::CPUOpAnnotations>();
op_annotations->set_mkldnn_op(true);
concat->set_op_annotations(op_annotations);
}
}
}
......
......@@ -39,6 +39,13 @@ namespace ngraph
for (size_t i = 0; i < args.size(); i++)
{
// CoordinateTransform gets confused when the last input has a zero-size dim, so we will
// just skip for zero-element tensors.
if (shape_size(in_shapes[i]) == 0)
{
continue;
}
// The start coordinate for the copy is (0,...,0) except at the concatenation axis.
Coordinate out_start_coord(out_shape.size(), 0);
out_start_coord[concatenation_axis] = concatenation_pos;
......
......@@ -668,6 +668,100 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_5d)
read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, concat_zero_length_1d_last)
{
Shape shape_a{4};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_b{0};
auto B = make_shared<op::Parameter>(element::f32, shape_b);
Shape shape_r{4};
auto r = make_shared<op::Concat>(NodeVector{A, B}, 0);
auto f = make_shared<Function>(r, op::ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
vector<float> a_data{1, 2, 3, 4};
vector<float> b_data(0);
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, a_data);
auto b = backend->create_tensor(element::f32, shape_b);
copy_data(b, b_data);
auto result = backend->create_tensor(element::f32, shape_r);
backend->call(f, {result}, {a, b});
EXPECT_EQ((vector<float>{1, 2, 3, 4}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, concat_zero_length_1d_middle)
{
Shape shape_a{4};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_b{0};
auto B = make_shared<op::Parameter>(element::f32, shape_b);
Shape shape_c{4};
auto C = make_shared<op::Parameter>(element::f32, shape_c);
Shape shape_r{8};
auto r = make_shared<op::Concat>(NodeVector{A, B, C}, 0);
auto f = make_shared<Function>(r, op::ParameterVector{A, B, C});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
vector<float> a_data{1, 2, 3, 4};
vector<float> b_data(0);
vector<float> c_data{5, 6, 7, 8};
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, a_data);
auto b = backend->create_tensor(element::f32, shape_b);
copy_data(b, b_data);
auto c = backend->create_tensor(element::f32, shape_c);
copy_data(c, c_data);
auto result = backend->create_tensor(element::f32, shape_r);
backend->call(f, {result}, {a, b, c});
EXPECT_EQ((vector<float>{1, 2, 3, 4, 5, 6, 7, 8}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, concat_zero_length_4d_middle)
{
Shape shape_a{2, 2, 1, 1};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_b{2, 2, 0, 1};
auto B = make_shared<op::Parameter>(element::f32, shape_b);
Shape shape_c{2, 2, 1, 1};
auto C = make_shared<op::Parameter>(element::f32, shape_c);
Shape shape_r{2, 2, 2, 1};
auto r = make_shared<op::Concat>(NodeVector{A, B, C}, 2);
auto f = make_shared<Function>(r, op::ParameterVector{A, B, C});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
vector<float> a_data{1, 2, 3, 4};
vector<float> b_data(0);
vector<float> c_data{5, 6, 7, 8};
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, a_data);
auto b = backend->create_tensor(element::f32, shape_b);
copy_data(b, b_data);
auto c = backend->create_tensor(element::f32, shape_c);
copy_data(c, c_data);
auto result = backend->create_tensor(element::f32, shape_r);
backend->call(f, {result}, {a, b, c});
EXPECT_EQ((vector<float>{1, 5, 2, 6, 3, 7, 4, 8}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, divide)
{
Shape shape{2, 2};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment