Unverified Commit 4c322687 authored by Tomasz Dołbniak's avatar Tomasz Dołbniak Committed by GitHub

[ONNX] Initial support for dynamic shapes (#4197)

* Switch to PartialShape in onnx_importer ValueInfo

* Construct dynamic dimensions out of ONNX dimensions defined as dim_param

* Validate the PartialShape of inputs created from an ONNX model with dynamic shapes

* Validate the output shape inference for a dynamic ONNX model

* Test the execution of an ONNX model with dynamic dimensions

* Test the Ax+B with more than one batch size

* Provenance tagging adjustments - PartialShape instead of Shape

* Correct translation of ONNX shapes to nG shapes

* Test the shape of Constant produced by scalar initializers

* Review comments & more strict assertions in UT

* UT checking a dynamic rank input

* Fully dynamic input inference test

* ONNX provenance tags adjustments and back to get_shape

* Remove an obsolete space in provenance tags
Co-Authored-By: 's avatarMichał Karzyński <postrational@users.noreply.github.com>

* Remove an obsolete space in provenance tests
Co-authored-by: 's avatarMichał Karzyński <postrational@users.noreply.github.com>
Co-authored-by: 's avatarScott Cyphers <diyessi@users.noreply.github.com>
parent cf21a361
......@@ -74,10 +74,10 @@ namespace ngraph
}
static std::string build_input_provenance_tag(const std::string& input_name,
const Shape& shape)
const PartialShape& shape)
{
std::stringstream tag_builder;
tag_builder << "<ONNX Input (" << input_name << ") " << shape << ">";
tag_builder << "<ONNX Input (" << input_name << ") Shape:" << shape << ">";
return tag_builder.str();
}
......
......@@ -365,6 +365,13 @@ namespace ngraph
: m_tensor_proto{&tensor}
, m_shape{std::begin(tensor.dims()), std::end(tensor.dims())}
{
if (m_shape == Shape{0})
{
// It's possible to construct a tensor in ONNX with "dims: 0" property
// Such tensor contains a scalar. This results in a Shape{0} stored in m_shape.
// In nGraph a scalar is represented with Shape{} and thus this replacement.
m_shape = Shape{};
}
}
Tensor(const Tensor&) = default;
......
......@@ -18,9 +18,8 @@
#include <onnx/onnx_pb.h>
#include "ngraph/op/constant.hpp"
#include "ngraph/op/parameter.hpp"
#include "ngraph/shape.hpp"
#include "default_opset.hpp"
#include "ngraph/partial_shape.hpp"
#include "ngraph/type/element_type.hpp"
#include "node.hpp"
#include "tensor.hpp"
......@@ -56,9 +55,15 @@ namespace ngraph
{
if (value_info_proto.type().has_tensor_type())
{
for (const auto& dim : value_info_proto.type().tensor_type().shape().dim())
const auto& onnx_tensor = value_info_proto.type().tensor_type();
if (onnx_tensor.has_shape())
{
m_partial_shape = to_ng_shape(onnx_tensor.shape());
}
else
{
m_shape.emplace_back(static_cast<Shape::value_type>(dim.dim_value()));
m_partial_shape = PartialShape::dynamic();
}
}
}
......@@ -67,7 +72,7 @@ namespace ngraph
ValueInfo& operator=(ValueInfo&&) = delete;
const std::string& get_name() const { return m_value_info_proto->name(); }
const Shape& get_shape() const { return m_shape; }
const PartialShape& get_shape() const { return m_partial_shape; }
const element::Type& get_element_type() const
{
if (!m_value_info_proto->type().tensor_type().has_elem_type())
......@@ -102,9 +107,31 @@ namespace ngraph
return tensor.get_ng_constant();
}
PartialShape to_ng_shape(const onnx::TensorShapeProto& onnx_shape) const
{
if (onnx_shape.dim_size() == 0)
{
return Shape{}; // empty list of dimensions denotes a scalar
}
std::vector<Dimension> dims;
for (const auto& onnx_dim : onnx_shape.dim())
{
if (onnx_dim.has_dim_value())
{
dims.emplace_back(onnx_dim.dim_value());
}
else if (onnx_dim.has_dim_param())
{
dims.push_back(Dimension::dynamic());
}
}
return PartialShape{dims};
}
private:
const onnx::ValueInfoProto* m_value_info_proto;
Shape m_shape;
PartialShape m_partial_shape;
};
inline std::ostream& operator<<(std::ostream& outs, const ValueInfo& info)
......
......@@ -497,6 +497,7 @@ if (NGRAPH_ONNX_IMPORT_ENABLE)
list(APPEND MULTI_TEST_SRC
onnx/onnx_import.in.cpp
onnx/onnx_import_convpool.in.cpp
onnx/onnx_import_dyn_shapes.in.cpp
onnx/onnx_import_provenance.in.cpp
onnx/onnx_import_reshape.in.cpp
onnx/onnx_import_rnn.in.cpp
......
......@@ -39,6 +39,8 @@ graph {
type {
tensor_type {
elem_type: 6
shape {
}
}
}
}
......
ir_version: 3
producer_name: "nGraph ONNX Importer"
model_version: 1
graph {
node {
name: "addition"
input: "A"
input: "B"
output: "add_out"
op_type: "Add"
}
input {
name: "A"
type {
tensor_type {
elem_type: 7
}
}
}
input {
name: "B"
type {
tensor_type {
elem_type: 7
shape {
}
}
}
}
output {
name: "add_out"
type {
tensor_type {
elem_type: 7
}
}
}
name: "simple_dyn_shapes_graph"
}
opset_import {
domain: ""
version: 7
}
ir_version: 3
producer_name: "nGraph ONNX Importer"
model_version: 1
graph {
node {
name: "multiplication"
input: "A"
input: "B"
output: "mul_out"
op_type: "Mul"
}
node {
name: "addition"
input: "mul_out"
input: "C"
output: "add_out"
op_type: "Add"
}
input {
name: "A"
type {
tensor_type {
elem_type: 7
shape {
dim {
dim_param: "batch"
}
dim {
dim_value: 2
}
}
}
}
}
input {
name: "B"
type {
tensor_type {
elem_type: 7
shape {
dim {
dim_param: "batch"
}
dim {
dim_value: 2
}
}
}
}
}
input {
name: "C"
type {
tensor_type {
elem_type: 7
shape {
dim {
dim_param: "batch"
}
dim {
dim_value: 2
}
}
}
}
}
output {
name: "add_out"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_param: "batch"
}
dim {
dim_value: 2
}
}
}
}
}
name: "simple_dyn_shapes_graph"
}
opset_import {
domain: ""
version: 7
}
ir_version: 3
producer_name: "nGraph ONNX Importer"
graph {
node {
input: "initializer_of_A"
input: "initializer_of_B"
output: "output_of_add"
op_type: "Add"
name: "Add_node"
}
name: "test_graph"
initializer {
data_type: 7
int64_data: 1
name: "initializer_of_A"
}
initializer {
dims: 0
data_type: 7
int64_data: 2
name: "initializer_of_B"
}
output {
name: "output_of_add"
type {
tensor_type {
elem_type: 7
shape {
dim {
dim_value: 1
}
}
}
}
}
}
opset_import {
version: 9
}
......@@ -10,7 +10,7 @@ graph {
}
name: "test_graph"
initializer {
dims: 0
dims: 1
data_type: 7
int64_data: 1
name: "initializer_of_A"
......@@ -21,8 +21,6 @@ graph {
tensor_type {
elem_type: 7
shape {
dim {
}
}
}
}
......@@ -34,6 +32,7 @@ graph {
elem_type: 7
shape {
dim {
dim_value: 1
}
}
}
......
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "gtest/gtest.h"
#include "ngraph/file_util.hpp"
#include "ngraph/frontend/onnx_import/default_opset.hpp"
#include "ngraph/frontend/onnx_import/onnx.hpp"
#include "util/test_control.hpp"
#include "util/test_tools.hpp"
#include "util/type_prop.hpp"
using namespace ngraph;
using namespace ngraph::onnx_import;
static std::string s_manifest = "${MANIFEST}";
NGRAPH_TEST(onnx_dyn_shapes_${BACKEND_NAME}, onnx_dynamic_dims_to_ngraph_dynamic_dims)
{
// the model represents a linear function A * x + B
// where all 3 operands are model inputs (no initializers)
const auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/dynamic_shapes/ab_plus_c.prototxt"));
const auto& graph_inputs = function->get_parameters();
EXPECT_EQ(graph_inputs.size(), 3);
// all inputs in the model have a 2D partial shape {?, 2}
for (const auto& input : graph_inputs)
{
const auto& input_ps = input->get_partial_shape();
EXPECT_TRUE(input_ps.is_dynamic());
ASSERT_TRUE(input_ps.rank().is_static());
EXPECT_EQ(static_cast<size_t>(input_ps.rank()), 2);
EXPECT_TRUE(input_ps[0].is_dynamic());
ASSERT_TRUE(input_ps[1].is_static());
EXPECT_EQ(static_cast<size_t>(input_ps[1]), 2);
}
const auto& graph_outputs = function->get_results();
EXPECT_EQ(graph_outputs.size(), 1);
const auto out = *(graph_outputs.cbegin());
const auto& out_ps = out->get_output_partial_shape(0);
ASSERT_TRUE(out_ps.rank().is_static());
EXPECT_EQ(static_cast<size_t>(out_ps.rank()), 2);
EXPECT_TRUE(out_ps[0].is_dynamic());
ASSERT_TRUE(out_ps[1].is_static());
EXPECT_EQ(static_cast<size_t>(out_ps[1]), 2);
}
NGRAPH_TEST(onnx_dyn_shapes_${BACKEND_NAME}, ab_plus_c_inference)
{
const auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/dynamic_shapes/ab_plus_c.prototxt"));
auto backend = runtime::Backend::create("${BACKEND_NAME}", true);
auto executable = backend->compile(function);
auto out_tensor = backend->create_dynamic_tensor(function->get_output_element_type(0),
function->get_output_partial_shape(0));
struct ExpectedValuesGenerator
{
int64_t i = 1;
int64_t operator()()
{
const auto ret = i * i + i;
++i;
return ret;
}
};
const size_t NUM_BATCHES_TO_TEST = 5;
for (size_t batch = 1; batch <= NUM_BATCHES_TO_TEST; ++batch)
{
const Shape input_shape = Shape{batch, 2};
const auto elems_in_tensor = shape_size(input_shape);
auto input_A = backend->create_tensor(element::i64, input_shape);
auto input_B = backend->create_tensor(element::i64, input_shape);
auto input_C = backend->create_tensor(element::i64, input_shape);
std::vector<int64_t> input_values(elems_in_tensor);
std::iota(input_values.begin(), input_values.end(), 1);
copy_data(input_A, input_values);
copy_data(input_B, input_values);
copy_data(input_C, input_values);
executable->call_with_validate({out_tensor}, {input_A, input_B, input_C});
const auto results = read_vector<int64_t>(out_tensor);
EXPECT_EQ(results.size(), elems_in_tensor);
std::vector<int64_t> expected_values(elems_in_tensor);
std::generate(expected_values.begin(), expected_values.end(), ExpectedValuesGenerator{});
EXPECT_TRUE(results == expected_values);
}
}
NGRAPH_TEST(onnx_dyn_shapes_${BACKEND_NAME}, scalar_initializers_shape_check)
{
// initializers defined witout the "dims" field should produce Constants with an empty Shape
// initializers with "dims: 0" should be have the same way (Shape{} not Shape{0})
const auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/dynamic_shapes/scalar_initializers.prototxt"));
for (const auto ng_node : function->get_ordered_ops())
{
if (as_type_ptr<default_opset::Constant>(ng_node))
{
EXPECT_EQ(ng_node->get_shape(), Shape{});
}
}
}
NGRAPH_TEST(onnx_dyn_shapes_${BACKEND_NAME}, dynamic_rank_input_check)
{
// the model contains a single Add operation that takes a fully dynamic input and a scalar
const auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/dynamic_shapes/a_plus_b_dyn_rank.prototxt"));
const auto& graph_inputs = function->get_parameters();
ASSERT_EQ(graph_inputs.size(), 2);
const auto dyn_rank_input = graph_inputs[0];
const auto scalar_input = graph_inputs[1];
EXPECT_TRUE(dyn_rank_input->get_partial_shape().rank().is_dynamic());
ASSERT_TRUE(scalar_input->get_partial_shape().is_static());
EXPECT_EQ(scalar_input->get_partial_shape().to_shape(), Shape{});
const auto& graph_outputs = function->get_results();
EXPECT_EQ(graph_outputs.size(), 1);
const auto out = *(graph_outputs.cbegin());
EXPECT_TRUE(out->get_output_partial_shape(0).rank().is_dynamic());
}
NGRAPH_TEST(onnx_dyn_shapes_${BACKEND_NAME}, dynamic_rank_input_inference)
{
// the model contains a single Add operation that takes a fully dynamic input and a scalar
const auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/dynamic_shapes/a_plus_b_dyn_rank.prototxt"));
auto backend = runtime::Backend::create("${BACKEND_NAME}", true);
auto executable = backend->compile(function);
auto out_tensor = backend->create_dynamic_tensor(function->get_output_element_type(0),
function->get_output_partial_shape(0));
const size_t RANKS_TO_TEST = 3;
const int64_t SCALAR_INPUT_VAL = 5;
for (size_t r = 0; r <= RANKS_TO_TEST; ++r)
{
const Shape input_a_shape = Shape(r, 2);
const auto elems_in_tensor = shape_size(input_a_shape);
auto input_A = backend->create_tensor(element::i64, input_a_shape);
auto input_B = backend->create_tensor(element::i64, Shape{});
std::vector<int64_t> input_values(elems_in_tensor);
std::iota(input_values.begin(), input_values.end(), 1);
copy_data(input_A, input_values);
copy_data<int64_t>(input_B, {SCALAR_INPUT_VAL});
executable->call_with_validate({out_tensor}, {input_A, input_B});
const auto results = read_vector<int64_t>(out_tensor);
EXPECT_EQ(results.size(), elems_in_tensor);
std::vector<int64_t> expected_values(elems_in_tensor);
std::iota(expected_values.begin(), expected_values.end(), SCALAR_INPUT_VAL + 1);
EXPECT_TRUE(results == expected_values);
}
}
......@@ -85,11 +85,11 @@ NGRAPH_TEST(onnx_${BACKEND_NAME}, provenance_multiple_outputs_op)
NGRAPH_TEST(onnx_${BACKEND_NAME}, provenance_tagging_constants)
{
test_provenance_tags<default_opset::Constant>("onnx/provenance_input_tags.prototxt",
"<ONNX Input (initializer_of_A) Shape{0}>");
"<ONNX Input (initializer_of_A) Shape:{1}>");
}
NGRAPH_TEST(onnx_${BACKEND_NAME}, provenance_tagging_parameters)
{
test_provenance_tags<default_opset::Parameter>("onnx/provenance_input_tags.prototxt",
"<ONNX Input (input_B) Shape{0}>");
"<ONNX Input (input_B) Shape:{}>");
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment