Commit 04f212b7 authored by Tomasz Socha's avatar Tomasz Socha Committed by Scott Cyphers

[Spec] Add v1::AvgPool and v1::MaxPool (#3591)

* Add new enum: RoundingType for pooling operations

* Add v1::AvgPool op

* Add v1::MaxPool op

* Fix comments format

* Fix problem with forward declaration

* new UT & fix some bugs
parent 640295cf
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -54,6 +54,13 @@ namespace ngraph
NOTSET = EXPLICIT,
};
/// \brief Rounding Type used for `Pooling` operators.
enum class RoundingType
{
FLOOR = 0,
CEIL = 1,
};
/// \brief Specifies the algorithm to use for implicit broadcasting of a tensor
/// to align with another tensor
///
......
......@@ -15,9 +15,11 @@
//*****************************************************************************
#include "ngraph/pass/opset1_upgrade.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/op/avg_pool.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/op/gather.hpp"
#include "ngraph/op/get_output_element.hpp"
#include "ngraph/op/max_pool.hpp"
#include "ngraph/op/pad.hpp"
#include "ngraph/op/product.hpp"
#include "ngraph/op/reduce_prod.hpp"
......@@ -82,18 +84,117 @@ bool pass::Opset1Upgrade::run_on_node(shared_ptr<Node> node)
#endif
switch (get_typeid(node))
{
case OP_TYPEID::Softmax:
case OP_TYPEID::AvgPool:
{
auto tmp = dynamic_cast<const op::v0::Softmax*>(node.get());
AxisSet axes = tmp->get_axes();
auto tmp = dynamic_cast<const op::v0::AvgPool*>(node.get());
NGRAPH_CHECK(
axes.size() == 1,
"Unable to convert Softmax:0 to Softmax:1 with zero or more than one axis. Node: ",
*node);
auto rounding_type = static_cast<op::RoundingType>(tmp->get_ceil_mode());
auto exclude_pad = !tmp->get_include_padding_in_avg_computation();
auto auto_pad = tmp->get_pad_type();
auto pads_begin = tmp->get_padding_below();
auto pads_end = tmp->get_padding_above();
auto strides = tmp->get_window_movement_strides();
auto kernel = tmp->get_window_shape();
auto replacement_node = make_shared<op::v1::AvgPool>(node->input(0).get_source_output(),
strides,
pads_begin,
pads_end,
kernel,
exclude_pad,
rounding_type,
auto_pad);
replace_node(node, replacement_node);
modified = true;
break;
}
case OP_TYPEID::AvgPoolBackprop:
{
auto tmp = dynamic_cast<const op::v0::AvgPoolBackprop*>(node.get());
auto exclude_pad = !tmp->get_include_padding_in_avg_computation();
auto pads_begin = tmp->get_padding_below();
auto pads_end = tmp->get_padding_above();
auto strides = tmp->get_window_movement_strides();
auto kernel = tmp->get_window_shape();
auto replacement_node =
make_shared<op::v1::Softmax>(node->input(0).get_source_output(), axes.to_vector()[0]);
make_shared<op::v1::AvgPoolBackprop>(tmp->get_forward_arg_shape(),
node->input(0).get_source_output(),
strides,
pads_begin,
pads_end,
kernel,
exclude_pad);
replace_node(node, replacement_node);
modified = true;
break;
}
case OP_TYPEID::Gather:
{
auto tmp = dynamic_cast<const op::v0::Gather*>(node.get());
int64_t axis = tmp->get_axis();
auto axis_node = make_shared<op::Constant>(element::i64, Shape{}, vector<int64_t>{axis});
auto replacement_node = make_shared<op::v1::Gather>(
node->input(0).get_source_output(), node->input(1).get_source_output(), axis_node);
replace_node(node, replacement_node);
modified = true;
break;
}
case OP_TYPEID::MaxPool:
{
auto tmp = dynamic_cast<const op::v0::MaxPool*>(node.get());
auto rounding_type = static_cast<op::RoundingType>(tmp->get_ceil_mode());
auto auto_pad = tmp->get_pad_type();
auto pads_begin = tmp->get_padding_below();
auto pads_end = tmp->get_padding_above();
auto strides = tmp->get_window_movement_strides();
auto kernel = tmp->get_window_shape();
auto replacement_node = make_shared<op::v1::MaxPool>(node->input(0).get_source_output(),
strides,
pads_begin,
pads_end,
kernel,
rounding_type,
auto_pad);
replace_node(node, replacement_node);
modified = true;
break;
}
case OP_TYPEID::MaxPoolBackprop:
{
auto tmp = dynamic_cast<const op::v0::MaxPoolBackprop*>(node.get());
auto pads_begin = tmp->get_padding_below();
auto pads_end = tmp->get_padding_above();
auto strides = tmp->get_window_movement_strides();
auto kernel = tmp->get_window_shape();
shared_ptr<Node> replacement_node;
if (node->get_inputs().size() == 3)
{
replacement_node =
make_shared<op::v1::MaxPoolBackprop>(node->input(0).get_source_output(),
node->input(1).get_source_output(),
node->input(2).get_source_output(),
strides,
pads_begin,
pads_end,
kernel);
}
else
{
replacement_node =
make_shared<op::v1::MaxPoolBackprop>(node->input(0).get_source_output(),
node->input(1).get_source_output(),
strides,
pads_begin,
pads_end,
kernel);
}
replace_node(node, replacement_node);
modified = true;
break;
......@@ -136,14 +237,18 @@ bool pass::Opset1Upgrade::run_on_node(shared_ptr<Node> node)
modified = true;
break;
}
case OP_TYPEID::Gather:
case OP_TYPEID::Softmax:
{
auto tmp = dynamic_cast<const op::v0::Gather*>(node.get());
int64_t axis = tmp->get_axis();
auto tmp = dynamic_cast<const op::v0::Softmax*>(node.get());
AxisSet axes = tmp->get_axes();
auto axis_node = make_shared<op::Constant>(element::i64, Shape{}, vector<int64_t>{axis});
auto replacement_node = make_shared<op::v1::Gather>(
node->input(0).get_source_output(), node->input(1).get_source_output(), axis_node);
NGRAPH_CHECK(
axes.size() == 1,
"Unable to convert Softmax:0 to Softmax:1 with zero or more than one axis. Node: ",
*node);
auto replacement_node =
make_shared<op::v1::Softmax>(node->input(0).get_source_output(), axes.to_vector()[0]);
replace_node(node, replacement_node);
modified = true;
break;
......
......@@ -21,7 +21,9 @@
#include "ngraph/code_writer.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/avg_pool.hpp"
#include "ngraph/op/gather.hpp"
#include "ngraph/op/max_pool.hpp"
#include "ngraph/op/pad.hpp"
#include "ngraph/op/product.hpp"
#include "ngraph/op/sum.hpp"
......@@ -127,9 +129,6 @@ namespace ngraph
class MaxPoolWithIndices;
class Reverse;
class ReverseSequence;
class AvgPool;
class AvgPoolBackprop;
class MaxPoolBackprop;
class MaxPoolWithIndicesBackprop;
class Max;
class Erf;
......
This diff is collapsed.
......@@ -75,6 +75,7 @@ set(SRC
opset_pass/softmax_opset_pass.cpp
opset_pass/gather_opset_pass.cpp
opset_pass/pad_opset_pass.cpp
opset_pass/poolings_opset_pass.cpp
partial_shape.cpp
pass.cpp
pass_liveness.cpp
......
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/opset1_upgrade.hpp"
#include "util/test_control.hpp"
#include "util/type_prop.hpp"
using namespace std;
using namespace ngraph;
TEST(upgrade_pass, opset1_avgpool_pass)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{1, 3, 6, 9});
Shape pads_begin{0, 0};
Shape pads_end{0, 0};
Strides strides{1, 1};
Shape kernel_shape{3, 3};
bool include_pad = true;
bool ceil_mode = false;
op::PadType pad_mode = op::PadType::EXPLICIT;
auto avgpool_v0 = make_shared<op::v0::AvgPool>(
arg, kernel_shape, strides, pads_begin, pads_end, include_pad, pad_mode, ceil_mode);
auto result = make_shared<op::Result>(avgpool_v0);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{arg});
ngraph::pass::Manager pass_manager;
pass_manager.register_pass<pass::Opset1Upgrade>();
pass_manager.run_passes(f);
auto avgpool_s1_result = f->get_results().at(0);
auto node = avgpool_s1_result->input(0).get_source_output().get_node_shared_ptr();
auto avg_pool_v1_node = static_pointer_cast<op::v1::AvgPool>(node);
EXPECT_EQ(avg_pool_v1_node->description(), "AvgPool");
EXPECT_EQ(avg_pool_v1_node->get_version(), 1);
EXPECT_EQ(avg_pool_v1_node->get_pads_begin(), pads_begin);
EXPECT_EQ(avg_pool_v1_node->get_pads_end(), pads_end);
EXPECT_EQ(avg_pool_v1_node->get_strides(), strides);
EXPECT_EQ(avg_pool_v1_node->get_kernel(), kernel_shape);
EXPECT_EQ(avg_pool_v1_node->get_rounding_type(), op::RoundingType::FLOOR);
EXPECT_EQ(avg_pool_v1_node->get_exclude_pad(), !include_pad);
EXPECT_EQ(avg_pool_v1_node->get_auto_pad(), pad_mode);
}
TEST(upgrade_pass, opset1_maxpool_pass)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{1, 3, 6, 9});
Shape pads_begin{0, 0};
Shape pads_end{0, 0};
Strides strides{1, 1};
Shape kernel_shape{3, 3};
bool ceil_mode = false;
op::PadType pad_mode = op::PadType::EXPLICIT;
auto maxpool_v0 = make_shared<op::v0::MaxPool>(
arg, kernel_shape, strides, pads_begin, pads_end, pad_mode, ceil_mode);
auto result = make_shared<op::Result>(maxpool_v0);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{arg});
ngraph::pass::Manager pass_manager;
pass_manager.register_pass<pass::Opset1Upgrade>();
pass_manager.run_passes(f);
auto maxpool_s1_result = f->get_results().at(0);
auto node = maxpool_s1_result->input(0).get_source_output().get_node_shared_ptr();
auto max_pool_v1_node = static_pointer_cast<op::v1::MaxPool>(node);
EXPECT_EQ(max_pool_v1_node->description(), "MaxPool");
EXPECT_EQ(max_pool_v1_node->get_version(), 1);
EXPECT_EQ(max_pool_v1_node->get_pads_begin(), pads_begin);
EXPECT_EQ(max_pool_v1_node->get_pads_end(), pads_end);
EXPECT_EQ(max_pool_v1_node->get_strides(), strides);
EXPECT_EQ(max_pool_v1_node->get_kernel(), kernel_shape);
EXPECT_EQ(max_pool_v1_node->get_rounding_type(), op::RoundingType::FLOOR);
EXPECT_EQ(max_pool_v1_node->get_auto_pad(), pad_mode);
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment