Unverified Commit fdd8db66 authored by Mateusz Bencer's avatar Mateusz Bencer Committed by GitHub

[ONNX] Add dynamic shapes support for pooling ops (#4285)

* Switch to PartialShape in onnx_importer ValueInfo

* Construct dynamic dimensions out of ONNX dimensions defined as dim_param

* Validate the PartialShape of inputs created from an ONNX model with dynamic shapes

* Validate the output shape inference for a dynamic ONNX model

* Test the execution of an ONNX model with dynamic dimensions

* Test the Ax+B with more than one batch size

* Provenance tagging adjustments - PartialShape instead of Shape

* Correct translation of ONNX shapes to nG shapes

* Test the shape of Constant produced by scalar initializers

* Review comments & more strict assertions in UT

* UT checking a dynamic rank input

* Fully dynamic input inference test

* first dynamic version

* modified UTs

* Added assert checks

* Added specialised methods

* first verion of AvgPool

* code review remarks introduced

* Changed tests to use default BackendMode value

* Reverted not related changes

* first verion of AvgPool

code review remarks introduced

Changed tests to use default BackendMode value

* first version of maxpool

* Changed PoolingFactory to support dynamic shapes

* fixed Pad op

* Added Uts to global ops

* Code review remarks introduced

* code review remarks introduced

* Code refactor

* Code review remarks introduced
Co-authored-by: 's avatarTomasz Dołbniak <tomasz.dolbniak@intel.com>
Co-authored-by: 's avatarSang Ik Lee <sang.ik.lee@intel.com>
parent d605e7fa
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
#include "average_pool.hpp" #include "average_pool.hpp"
#include "ngraph/node.hpp" #include "ngraph/node.hpp"
#include "ngraph/op/avg_pool.hpp"
#include "utils/pooling_factory.hpp" #include "utils/pooling_factory.hpp"
namespace ngraph namespace ngraph
...@@ -29,7 +28,7 @@ namespace ngraph ...@@ -29,7 +28,7 @@ namespace ngraph
{ {
NodeVector average_pool(const Node& node) NodeVector average_pool(const Node& node)
{ {
return pooling::PoolingFactory(node).make_avg_pool(); return pooling::LocalPoolingFactory(node).make_avg_pool();
} }
} // namespace set_1 } // namespace set_1
......
...@@ -31,7 +31,7 @@ namespace ngraph ...@@ -31,7 +31,7 @@ namespace ngraph
{ {
NodeVector max_pool(const Node& node) NodeVector max_pool(const Node& node)
{ {
auto max_pool = pooling::PoolingFactory(node).make_max_pool(); auto max_pool = pooling::LocalPoolingFactory(node).make_max_pool();
max_pool.emplace_back(std::make_shared<NullNode>()); // Indices (optional) max_pool.emplace_back(std::make_shared<NullNode>()); // Indices (optional)
return max_pool; return max_pool;
} }
......
...@@ -65,14 +65,19 @@ namespace ngraph ...@@ -65,14 +65,19 @@ namespace ngraph
NodeVector pad(const Node& node) NodeVector pad(const Node& node)
{ {
auto data = node.get_ng_inputs().at(0); auto data = node.get_ng_inputs().at(0);
const Shape& data_shape = data->get_shape();
const auto data_rank =
node.get_ng_inputs().at(0)->get_output_partial_shape(0).rank();
CHECK_VALID_NODE(
node, data_rank.is_static(), "Data rank must be static for pad op");
const auto data_rank_value = static_cast<size_t>(data_rank);
double value = node.get_attribute_value<double>("value", 0); double value = node.get_attribute_value<double>("value", 0);
const std::string mode = const std::string mode =
node.get_attribute_value<std::string>("mode", "constant"); node.get_attribute_value<std::string>("mode", "constant");
ngraph::op::PadMode pad_mode = get_pad_mode(mode); ngraph::op::PadMode pad_mode = get_pad_mode(mode);
auto paddings = convpool::get_pads(node, data_shape); const auto paddings = convpool::get_pads(node, data_rank_value);
ngraph::CoordinateDiff padding_below = paddings.first; ngraph::CoordinateDiff padding_below = paddings.first;
ngraph::CoordinateDiff padding_above = paddings.second; ngraph::CoordinateDiff padding_above = paddings.second;
......
...@@ -38,28 +38,41 @@ namespace ngraph ...@@ -38,28 +38,41 @@ namespace ngraph
namespace detail namespace detail
{ {
Strides get_strides_helper(const Node& node, /// \brief Helper method used to read vector attribute
const std::string& name, /// \note Default value is vector of size spatial dims filled with
const Shape& kernel_shape) /// ones
{ ///
return node.get_attribute_value<std::vector<std::size_t>>( /// \param node Node from which attribute is read
name, std::vector<std::size_t>(kernel_shape.size(), 1UL)); /// \param attr_name Attribute name (such as `strides`, `dilations`)
///
/// \return Read vector attribute if available or default value
std::vector<std::size_t> get_attribute_value(const Node& node,
const std::string& attr_name)
{
if (node.has_attribute(attr_name))
{
return node.get_attribute_value<std::vector<std::size_t>>(attr_name);
} }
} // namespace detail const auto data_rank =
node.get_ng_inputs().at(0)->get_output_partial_shape(0).rank();
Strides get_strides(const Node& node, const Shape& kernel_shape) CHECK_VALID_NODE(node,
{ data_rank.is_static(),
return detail::get_strides_helper(node, "strides", kernel_shape); "If '",
attr_name,
"' is not provided data rank must be static");
const auto data_spatial_dims = static_cast<size_t>(data_rank) - 2;
return std::vector<std::size_t>(data_spatial_dims, 1UL);
} }
} // namespace detail
Strides get_strides(const Node& node) Strides get_strides(const Node& node)
{ {
return get_strides(node, get_kernel_shape(node)); return detail::get_attribute_value(node, "strides");
} }
Strides get_dilations(const Node& node) Strides get_dilations(const Node& node)
{ {
return detail::get_strides_helper(node, "dilations", get_kernel_shape(node)); return detail::get_attribute_value(node, "dilations");
} }
ngraph::op::PadType get_auto_pad(const Node& node) ngraph::op::PadType get_auto_pad(const Node& node)
...@@ -90,16 +103,16 @@ namespace ngraph ...@@ -90,16 +103,16 @@ namespace ngraph
} }
std::pair<CoordinateDiff, CoordinateDiff> get_pads(const Node& node, std::pair<CoordinateDiff, CoordinateDiff> get_pads(const Node& node,
const Shape& kernel_shape) const size_t kernel_rank)
{ {
CoordinateDiff pads(kernel_shape.size(), 0); CoordinateDiff pads(kernel_rank, 0);
if (node.has_attribute("pads")) if (node.has_attribute("pads"))
{ {
auto pads_int64 = node.get_attribute_value<std::vector<int64_t>>("pads"); auto pads_int64 = node.get_attribute_value<std::vector<int64_t>>("pads");
pads = CoordinateDiff{std::begin(pads_int64), std::end(pads_int64)}; pads = CoordinateDiff{std::begin(pads_int64), std::end(pads_int64)};
} }
if (pads.size() == kernel_shape.size() * 2) if (pads.size() == kernel_rank * 2)
{ {
return {{std::begin(pads), std::begin(pads) + pads.size() / 2}, return {{std::begin(pads), std::begin(pads) + pads.size() / 2},
{std::begin(pads) + pads.size() / 2, std::end(pads)}}; {std::begin(pads) + pads.size() / 2, std::end(pads)}};
...@@ -112,6 +125,18 @@ namespace ngraph ...@@ -112,6 +125,18 @@ namespace ngraph
} }
} }
std::pair<CoordinateDiff, CoordinateDiff> get_pads(const Node& node)
{
const auto data_rank =
node.get_ng_inputs().at(0)->get_output_partial_shape(0).rank();
CHECK_VALID_NODE(node,
data_rank.is_static(),
"The rank of node must be static in order to calculate pads");
const auto data_spatial_dims = static_cast<size_t>(data_rank) - 2;
return get_pads(node, data_spatial_dims);
}
void calculate_auto_pads(const Shape& data_shape, void calculate_auto_pads(const Shape& data_shape,
const Shape& filter_shape, const Shape& filter_shape,
const Strides& strides, const Strides& strides,
......
...@@ -33,13 +33,6 @@ namespace ngraph ...@@ -33,13 +33,6 @@ namespace ngraph
/// \return The kernel Shape object representing its dimensions (height, width, depth). /// \return The kernel Shape object representing its dimensions (height, width, depth).
Shape get_kernel_shape(const Node& node); Shape get_kernel_shape(const Node& node);
/// \brief Get number of pixels to stride operation by in each direction.
///
/// \param node The Node ptr representing Conv or Pool operation.
/// \param kernel_shape The shape of the kernel which we retrieve strides for.
/// \return The kernel Shape object representing its dimensions (height, width, depth).
Strides get_strides(const Node& node, const Shape& kernel_shape);
/// \brief Get number of pixels to stride operation by in each direction. /// \brief Get number of pixels to stride operation by in each direction.
/// ///
/// \param node The Node ptr representing Conv or Pool operation. /// \param node The Node ptr representing Conv or Pool operation.
...@@ -59,12 +52,12 @@ namespace ngraph ...@@ -59,12 +52,12 @@ namespace ngraph
/// `pads` value should follow [x1_begin, x2_begin..., x1_end, x2_end,...]. /// `pads` value should follow [x1_begin, x2_begin..., x1_end, x2_end,...].
/// ///
/// \param node The Node ptr representing ONNX operation. /// \param node The Node ptr representing ONNX operation.
/// \param kernel_shape The shape of the kernel which we retrieve pads for. /// \param kernel_rank The rank of the kernel which we retrieve pads for.
/// ///
/// \return A pair of (padding_above, padding_below), which elements contains number of /// \return A pair of (padding_above, padding_below), which elements contains number of
/// pixels to pad in respective dimensions (height, width, depth). /// pixels to pad in respective dimensions (height, width, depth).
std::pair<CoordinateDiff, CoordinateDiff> get_pads(const Node& node, std::pair<CoordinateDiff, CoordinateDiff> get_pads(const Node& node,
const Shape& kernel_shape); const size_t kernel_rank);
/// \brief Get padding values for the operation described by an ONNX node. /// \brief Get padding values for the operation described by an ONNX node.
/// \details Values are taken from the `pads` attribute. /// \details Values are taken from the `pads` attribute.
...@@ -75,11 +68,7 @@ namespace ngraph ...@@ -75,11 +68,7 @@ namespace ngraph
/// ///
/// \return A pair of (padding_above, padding_below), which elements contains number of /// \return A pair of (padding_above, padding_below), which elements contains number of
/// pixels to pad in respective dimensions (height, width, depth). /// pixels to pad in respective dimensions (height, width, depth).
std::pair<CoordinateDiff, CoordinateDiff> get_pads(const Node& node);
inline std::pair<CoordinateDiff, CoordinateDiff> get_pads(const Node& node)
{
return get_pads(node, get_kernel_shape(node));
}
/// ///
/// \brief Calculate paddings with respect to auto_pad value. /// \brief Calculate paddings with respect to auto_pad value.
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <iterator> #include <iterator>
#include "default_opset.hpp" #include "default_opset.hpp"
#include "exceptions.hpp"
#include "ngraph/coordinate_diff.hpp" #include "ngraph/coordinate_diff.hpp"
#include "utils/convpool.hpp" #include "utils/convpool.hpp"
#include "utils/pooling_factory.hpp" #include "utils/pooling_factory.hpp"
...@@ -30,12 +31,11 @@ namespace ngraph ...@@ -30,12 +31,11 @@ namespace ngraph
PoolingFactory::PoolingFactory(const Node& node) PoolingFactory::PoolingFactory(const Node& node)
: m_onnx_node{node} : m_onnx_node{node}
, m_inputs{node.get_ng_inputs()} , m_inputs{node.get_ng_inputs()}
, m_kernel_shape{convpool::get_kernel_shape(node)}
, m_strides{convpool::get_strides(node)} , m_strides{convpool::get_strides(node)}
, m_dilations{convpool::get_dilations(node)} , m_dilations{convpool::get_dilations(node)}
, m_auto_pad{convpool::get_auto_pad(node)} , m_auto_pad{convpool::get_auto_pad(node)}
{ {
auto paddings = convpool::get_pads(node); const auto paddings = convpool::get_pads(node);
const CoordinateDiff& padding_above{paddings.second}; const CoordinateDiff& padding_above{paddings.second};
const CoordinateDiff& padding_below{paddings.first}; const CoordinateDiff& padding_below{paddings.first};
m_padding_below = Shape{std::begin(padding_below), std::end(padding_below)}; m_padding_below = Shape{std::begin(padding_below), std::end(padding_below)};
...@@ -44,7 +44,7 @@ namespace ngraph ...@@ -44,7 +44,7 @@ namespace ngraph
NodeVector PoolingFactory::make_avg_pool() const NodeVector PoolingFactory::make_avg_pool() const
{ {
bool count_include_pad = const bool count_include_pad =
m_onnx_node.get_attribute_value<std::int64_t>("count_include_pad", 0); m_onnx_node.get_attribute_value<std::int64_t>("count_include_pad", 0);
return {std::make_shared<default_opset::AvgPool>(m_inputs.at(0), return {std::make_shared<default_opset::AvgPool>(m_inputs.at(0),
m_strides, m_strides,
...@@ -67,13 +67,31 @@ namespace ngraph ...@@ -67,13 +67,31 @@ namespace ngraph
m_auto_pad)}; m_auto_pad)};
} }
LocalPoolingFactory::LocalPoolingFactory(const Node& node)
: PoolingFactory(node)
{
// Kernel shape is required
m_kernel_shape = node.get_attribute_value<std::vector<std::size_t>>("kernel_shape");
}
GlobalPoolingFactory::GlobalPoolingFactory(const Node& node) GlobalPoolingFactory::GlobalPoolingFactory(const Node& node)
: PoolingFactory(node) : PoolingFactory(node)
{ {
// Correct the kernel shape. const auto data_shape = node.get_ng_inputs().at(0)->get_output_partial_shape(0);
const Shape& data_shape{m_inputs.at(0)->get_shape()}; const auto data_rank = data_shape.rank();
CHECK_VALID_NODE(
node, data_rank.is_static(), "Data rank must be static for global pooling ops");
Shape kernel_shape;
for (auto i = 2; i < static_cast<size_t>(data_rank); ++i)
{
CHECK_VALID_NODE(node,
data_shape[i].is_static(),
"All spatial dimensions must be known for global pooling ops");
kernel_shape.emplace_back(static_cast<size_t>(data_shape[i]));
}
// Set shape to all but {N,C} axes. // Set shape to all but {N,C} axes.
m_kernel_shape = Shape{std::next(std::begin(data_shape), 2), std::end(data_shape)}; m_kernel_shape = kernel_shape;
} }
} // namespace pooling } // namespace pooling
} // namespace onnx_import } // namespace onnx_import
......
...@@ -48,7 +48,6 @@ namespace ngraph ...@@ -48,7 +48,6 @@ namespace ngraph
class PoolingFactory class PoolingFactory
{ {
public: public:
explicit PoolingFactory(const Node& node);
virtual ~PoolingFactory() = default; virtual ~PoolingFactory() = default;
/// ///
...@@ -64,6 +63,8 @@ namespace ngraph ...@@ -64,6 +63,8 @@ namespace ngraph
NodeVector make_max_pool() const; NodeVector make_max_pool() const;
protected: protected:
explicit PoolingFactory(const Node& node);
Node m_onnx_node; Node m_onnx_node;
const NodeVector m_inputs; const NodeVector m_inputs;
Shape m_kernel_shape; Shape m_kernel_shape;
...@@ -75,9 +76,20 @@ namespace ngraph ...@@ -75,9 +76,20 @@ namespace ngraph
}; };
/// ///
/// \brief Factory class which generates sub-graphs for ONNX 'global' pooling /// \brief Factory class which generates sub-graphs for ONNX 'local' pooling
/// operators. /// operators.
/// \note Kernel shape attribute is required
class LocalPoolingFactory : public PoolingFactory
{
public:
explicit LocalPoolingFactory(const Node& node);
virtual ~LocalPoolingFactory() = default;
};
/// ///
/// \brief Factory class which generates sub-graphs for ONNX 'global' pooling
/// operators.
/// \note Kernel shape is calculated based on spatial dims
class GlobalPoolingFactory : public PoolingFactory class GlobalPoolingFactory : public PoolingFactory
{ {
public: public:
......
ir_version: 3
producer_name: "nGraph ONNX Importer"
graph {
node {
input: "x"
output: "y"
op_type: "AveragePool"
attribute {
name: "kernel_shape"
ints: 2
ints: 2
type: INTS
}
attribute {
name: "strides"
ints: 2
ints: 2
type: INTS
}
}
name: "compute_graph"
input {
name: "x"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_param: "batch"
}
dim {
dim_param: "batch"
}
dim {
dim_param: "batch"
}
dim {
dim_param: "batch"
}
}
}
}
}
output {
name: "y"
type {
tensor_type {
elem_type: 1
shape {
}
}
}
}
}
opset_import {
version: 7
}
ir_version: 3
producer_name: "nGraph ONNX Importer"
graph {
node {
input: "x"
output: "y"
op_type: "GlobalAveragePool"
attribute {
name: "strides"
ints: 2
ints: 2
type: INTS
}
}
name: "compute_graph"
input {
name: "x"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_param: "batch"
}
dim {
dim_param: "batch"
}
dim {
dim_value: 5
}
dim {
dim_value: 5
}
}
}
}
}
output {
name: "y"
type {
tensor_type {
elem_type: 1
shape {
}
}
}
}
}
opset_import {
version: 7
}
ir_version: 3
producer_name: "nGraph ONNX Importer"
graph {
node {
input: "x"
output: "y"
op_type: "GlobalMaxPool"
attribute {
name: "strides"
ints: 2
ints: 2
type: INTS
}
}
name: "compute_graph"
input {
name: "x"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_param: "batch"
}
dim {
dim_param: "batch"
}
dim {
dim_value: 5
}
dim {
dim_value: 5
}
}
}
}
}
output {
name: "y"
type {
tensor_type {
elem_type: 1
shape {
}
}
}
}
}
opset_import {
version: 7
}
ir_version: 3
producer_name: "nGraph ONNX Importer"
graph {
node {
input: "x"
output: "y"
op_type: "MaxPool"
attribute {
name: "kernel_shape"
ints: 2
ints: 2
type: INTS
}
attribute {
name: "strides"
ints: 2
ints: 2
type: INTS
}
attribute {
name: "pads"
ints: 1
ints: 1
ints: 1
ints: 1
type: INTS
}
}
name: "compute_graph"
input {
name: "x"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_param: "batch"
}
dim {
dim_param: "batch"
}
dim {
dim_param: "batch"
}
dim {
dim_param: "batch"
}
}
}
}
}
output {
name: "y"
type {
tensor_type {
elem_type: 1
shape {
}
}
}
}
}
opset_import {
version: 7
}
...@@ -282,3 +282,83 @@ NGRAPH_TEST(onnx_dyn_shapes_${BACKEND_NAME}, model_conv_with_dynamic_batch) ...@@ -282,3 +282,83 @@ NGRAPH_TEST(onnx_dyn_shapes_${BACKEND_NAME}, model_conv_with_dynamic_batch)
test_case.run(); test_case.run();
} }
NGRAPH_TEST(onnx_dyn_shapes_${BACKEND_NAME}, avg_pool_dyn_shape)
{
const auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/dynamic_shapes/average_pool_2d_dyn.prototxt"));
auto test_case = NgraphTestCase(function, "${BACKEND_NAME}", BackendMode::DYNAMIC);
const Shape shape{1, 1, 4, 4};
const auto elems_in_tensor = shape_size(shape);
std::vector<float> input_values(elems_in_tensor);
std::iota(input_values.begin(), input_values.end(), 0.f);
test_case.add_input<float>(shape, input_values);
std::vector<float> expected_values{2.5f, 4.5f, 10.5f, 12.5f};
test_case.add_expected_output<float>(Shape{1, 1, 2, 2}, expected_values);
test_case.run();
}
NGRAPH_TEST(onnx_dyn_shapes_${BACKEND_NAME}, max_pool_dyn_shape)
{
const auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/dynamic_shapes/max_pool_2d_dyn.prototxt"));
auto test_case = NgraphTestCase(function, "${BACKEND_NAME}", BackendMode::DYNAMIC);
const Shape shape{1, 1, 4, 4};
const auto elems_in_tensor = shape_size(shape);
std::vector<float> input_values(elems_in_tensor);
std::iota(input_values.begin(), input_values.end(), 0.f);
test_case.add_input<float>(shape, input_values);
std::vector<float> expected_values{0.f, 2.f, 3.f, 8.f, 10.f, 11.f, 12.f, 14.f, 15.f};
test_case.add_expected_output<float>(Shape{1, 1, 3, 3}, expected_values);
test_case.run();
}
NGRAPH_TEST(onnx_dyn_shapes_${BACKEND_NAME}, global_avg_pool_dyn_shape)
{
const auto function = onnx_import::import_onnx_model(file_util::path_join(
SERIALIZED_ZOO, "onnx/dynamic_shapes/global_average_pool_dyn.prototxt"));
auto test_case = NgraphTestCase(function, "${BACKEND_NAME}", BackendMode::DYNAMIC);
const Shape shape{1, 3, 5, 5};
const auto elems_in_tensor = shape_size(shape);
std::vector<float> input_values(elems_in_tensor);
std::iota(input_values.begin(), input_values.end(), 0.f);
test_case.add_input<float>(shape, input_values);
std::vector<float> expected_values{12.f, 37.f, 62.f};
test_case.add_expected_output<float>(Shape{1, 3, 1, 1}, expected_values);
test_case.run();
}
NGRAPH_TEST(onnx_dyn_shapes_${BACKEND_NAME}, global_max_pool_dyn_shape)
{
const auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/dynamic_shapes/global_max_pool_dyn.prototxt"));
auto test_case = NgraphTestCase(function, "${BACKEND_NAME}", BackendMode::DYNAMIC);
const Shape shape{1, 3, 5, 5};
const auto elems_in_tensor = shape_size(shape);
std::vector<float> input_values(elems_in_tensor);
std::iota(input_values.begin(), input_values.end(), 0.f);
test_case.add_input<float>(shape, input_values);
std::vector<float> expected_values{24.f, 49.f, 74.f};
test_case.add_expected_output<float>(Shape{1, 3, 1, 1}, expected_values);
test_case.run();
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment