Unverified Commit c4bcabac authored by Mateusz Bencer's avatar Mateusz Bencer Committed by GitHub

[ONNX] Add dynamic shapes support for expand op (#4379)

* [ONNX] Add new ConstatntOfShape operator

* Fix a bug in op implementation

* Modify downgrade pass to support broadcast scalars

* Style-fix

* Use at instead of []

* Use onnx helper instead of ngraph builder

* Add some UT

* Style fix

* Limit range of DynBroadcast in downgrade pass

* Move tests to better location

* Rewrite tests to test_case

* Add check if arg_pshape is static

* Added unit tests

* finished impl, added tests

* Added missing EOF

* Make tests fully dynamic

* disabled tests for gpu

* Changed file names

* styles applied

* code review remarks introduced
Co-authored-by: 's avatarTomasz Socha <tomasz.socha@intel.com>
Co-authored-by: 's avatarScott Cyphers <diyessi@users.noreply.github.com>
parent c3e8f560
......@@ -14,20 +14,13 @@
// limitations under the License.
//*****************************************************************************
#include <cstddef>
#include <cstdint>
#include <memory>
#include "default_opset.hpp"
#include "expand.hpp"
#include "ngraph/descriptor/output.hpp"
#include "ngraph/op/broadcast.hpp"
#include "ngraph/op/experimental/dyn_broadcast.hpp"
#include "ngraph/op/experimental/dyn_reshape.hpp"
#include "ngraph/op/experimental/range.hpp"
#include "ngraph/op/experimental/shape_of.hpp"
#include "ngraph/op/reshape.hpp"
#include "ngraph/shape.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/op/multiply.hpp"
namespace ngraph
{
......@@ -42,18 +35,11 @@ namespace ngraph
const std::shared_ptr<ngraph::Node> data{node.get_ng_inputs().at(0)};
const std::shared_ptr<ngraph::Node> shape{node.get_ng_inputs().at(1)};
NGRAPH_CHECK(shape->is_constant(),
"Ngraph does not support dynamic braodcasting for Expand op.");
const auto const_filled_with_ones = std::make_shared<default_opset::Broadcast>(
default_opset::Constant::create(data->get_element_type(), {}, {1}), shape);
std::vector<std::size_t> shape_vector =
ngraph::as_type_ptr<default_opset::Constant>(shape)
->get_vector<std::size_t>();
const ngraph::Shape shape_shape{shape_vector};
return {std::make_shared<default_opset::Broadcast>(
data,
default_opset::Constant::create(
element::i64, Shape{shape_shape.size()}, shape_shape))};
return {
std::make_shared<default_opset::Multiply>(data, const_filled_with_ones)};
}
} // namespace set_1
......
......@@ -258,6 +258,13 @@ model_split_equal_parts_default
model_split_equal_parts_2d
model_split_variable_parts_2d
model_expand_static_shape
expand_1_dyn_shape
expand_2_dyn_shape
expand_3_dyn_shape
expand_4_dyn_shape
expand_5_dyn_shape
expand_6_dyn_shape
expand_uint16_dyn_shape
model_conv2d_strides_padding
model_conv2d_strides_no_padding
model_conv2d_strides_assymetric_padding
......
ir_version: 4
producer_name: "nGraph ONNX Importer"
graph {
node {
input: "data"
input: "shape"
output: "expanded"
name: "expand_1"
op_type: "Expand"
}
name: "expand test"
input {
name: "data"
type {
tensor_type {
elem_type: 1
}
}
}
input {
name: "shape"
type {
tensor_type {
elem_type: 7
}
}
}
output {
name: "expanded"
type {
tensor_type {
elem_type: 1
}
}
}
}
opset_import {
version: 1
}
ir_version: 4
producer_name: "nGraph ONNX Importer"
graph {
node {
input: "data"
input: "shape"
output: "expanded"
name: "expand_1"
op_type: "Expand"
}
name: "expand test"
input {
name: "data"
type {
tensor_type {
elem_type: 4
}
}
}
input {
name: "shape"
type {
tensor_type {
elem_type: 7
}
}
}
output {
name: "expanded"
type {
tensor_type {
elem_type: 4
}
}
}
}
opset_import {
version: 1
}
......@@ -392,3 +392,121 @@ NGRAPH_TEST(onnx_${BACKEND_NAME}, model_constant_of_shape_int_ones)
test_case.run();
}
NGRAPH_TEST(onnx_dyn_shapes_${BACKEND_NAME}, expand_1_dyn_shape)
{
const auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/dynamic_shapes/expand_dyn.prototxt"));
auto test_case = NgraphTestCase(function, "${BACKEND_NAME}", BackendMode::DYNAMIC);
test_case.add_input<float>(Shape{3, 1}, std::vector<float>{1.f, 2.f, 3.f});
test_case.add_input<int64_t>(Shape{3}, std::vector<int64_t>{2, 1, 6});
std::vector<float> expected_values{1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 2.f, 2.f, 2.f, 2.f, 2.f, 2.f,
3.f, 3.f, 3.f, 3.f, 3.f, 3.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f,
2.f, 2.f, 2.f, 2.f, 2.f, 2.f, 3.f, 3.f, 3.f, 3.f, 3.f, 3.f};
test_case.add_expected_output<float>(Shape{2, 3, 6}, expected_values);
test_case.run();
}
NGRAPH_TEST(onnx_dyn_shapes_${BACKEND_NAME}, expand_2_dyn_shape)
{
const auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/dynamic_shapes/expand_dyn.prototxt"));
auto test_case = NgraphTestCase(function, "${BACKEND_NAME}", BackendMode::DYNAMIC);
test_case.add_input<float>(Shape{3, 1}, std::vector<float>{1.f, 2.f, 3.f});
test_case.add_input<int64_t>(Shape{3}, std::vector<int64_t>{2, 3, 4});
std::vector<float> expected_values{1.f, 1.f, 1.f, 1.f, 2.f, 2.f, 2.f, 2.f, 3.f, 3.f, 3.f, 3.f,
1.f, 1.f, 1.f, 1.f, 2.f, 2.f, 2.f, 2.f, 3.f, 3.f, 3.f, 3.f};
test_case.add_expected_output<float>(Shape{2, 3, 4}, expected_values);
test_case.run();
}
NGRAPH_TEST(onnx_dyn_shapes_${BACKEND_NAME}, expand_3_dyn_shape)
{
const auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/dynamic_shapes/expand_dyn.prototxt"));
auto test_case = NgraphTestCase(function, "${BACKEND_NAME}", BackendMode::DYNAMIC);
test_case.add_input<float>(Shape{2, 1}, std::vector<float>{4.f, 5.f});
test_case.add_input<int64_t>(Shape{2}, std::vector<int64_t>{2, 4});
std::vector<float> expected_values{4.f, 4.f, 4.f, 4.f, 5.f, 5.f, 5.f, 5.f};
test_case.add_expected_output<float>(Shape{2, 4}, expected_values);
test_case.run();
}
NGRAPH_TEST(onnx_dyn_shapes_${BACKEND_NAME}, expand_4_dyn_shape)
{
const auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/dynamic_shapes/expand_dyn.prototxt"));
auto test_case = NgraphTestCase(function, "${BACKEND_NAME}", BackendMode::DYNAMIC);
test_case.add_input<float>(Shape{1, 3, 1}, std::vector<float>{7.f, 8.f, 9.f});
test_case.add_input<int64_t>(Shape{2}, std::vector<int64_t>{3, 1});
std::vector<float> expected_values{7.f, 8.f, 9.f};
test_case.add_expected_output<float>(Shape{1, 3, 1}, expected_values);
test_case.run();
}
NGRAPH_TEST(onnx_dyn_shapes_${BACKEND_NAME}, expand_5_dyn_shape)
{
const auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/dynamic_shapes/expand_dyn.prototxt"));
auto test_case = NgraphTestCase(function, "${BACKEND_NAME}", BackendMode::DYNAMIC);
test_case.add_input<float>(Shape{1, 4, 1}, std::vector<float>{7.f, 8.f, 9.f, 10.f});
test_case.add_input<int64_t>(Shape{2}, std::vector<int64_t>{1, 4});
std::vector<float> expected_values{
7.f, 7.f, 7.f, 7.f, 8.f, 8.f, 8.f, 8.f, 9.f, 9.f, 9.f, 9.f, 10.f, 10.f, 10.f, 10.f};
test_case.add_expected_output<float>(Shape{1, 4, 4}, expected_values);
test_case.run();
}
NGRAPH_TEST(onnx_dyn_shapes_${BACKEND_NAME}, expand_6_dyn_shape)
{
const auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/dynamic_shapes/expand_dyn.prototxt"));
auto test_case = NgraphTestCase(function, "${BACKEND_NAME}", BackendMode::DYNAMIC);
test_case.add_input<float>(Shape{1, 3, 1}, std::vector<float>{7.f, 8.f, 9.f});
test_case.add_input<int64_t>(Shape{3}, std::vector<int64_t>{3, 1, 3});
std::vector<float> expected_values{7.f, 7.f, 7.f, 8.f, 8.f, 8.f, 9.f, 9.f, 9.f,
7.f, 7.f, 7.f, 8.f, 8.f, 8.f, 9.f, 9.f, 9.f,
7.f, 7.f, 7.f, 8.f, 8.f, 8.f, 9.f, 9.f, 9.f};
test_case.add_expected_output<float>(Shape{3, 3, 3}, expected_values);
test_case.run();
}
NGRAPH_TEST(onnx_dyn_shapes_${BACKEND_NAME}, expand_uint16_dyn_shape)
{
const auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/dynamic_shapes/expand_uint16_dyn.prototxt"));
auto test_case = NgraphTestCase(function, "${BACKEND_NAME}", BackendMode::DYNAMIC);
test_case.add_input<uint16_t>(Shape{1, 2, 1}, std::vector<uint16_t>{1, 2});
test_case.add_input<int64_t>(Shape{4}, std::vector<int64_t>{2, 2, 1, 2});
std::vector<uint16_t> expected_values{1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2};
test_case.add_expected_output<uint16_t>(Shape{2, 2, 2, 2}, expected_values);
test_case.run();
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment