Commit 5a6cf4d0 authored by Mateusz Bencer's avatar Mateusz Bencer Committed by Scott Cyphers

[SPEC] Implement ReduceSum:v1 and ReduceProd:v1 downgrade pass (#3676)

* Implemented downgrade pass

* Using Pad:v1 in onnx_importer

* Downgrade transformation doc fixed

* Added downgrade pass for reduce_prod

* Using ReduceProd:v1 in onnx_importer

* Added ReduceProd:1 downgrade support

* Clang styles applied

* Downgrade pass added for all backends

* Added downgrade pass for reduce_prod

* Using ReduceProd:v1 in onnx_importer

* Added ReduceProd:1 downgrade support

* Clang styles applied

* Apply suggestions from code review

Changed pad_opset_pass to opset_downgrade
Co-Authored-By: 's avatarTomasz Socha <tomasz.socha@intel.com>

* Fix names

* Apply suggestions from code review

Removed redundant include, renamed tests
Co-Authored-By: 's avatarTomasz Socha <tomasz.socha@intel.com>

* Changed order of passes

* Changed way of op casting

* Changed order of passes

* Changed downgrade pass order of CPU backend

* Changed downgrade pass order of CPU backend

* styles applied

* [SPEC] Fixed NormalizeL2 (#3695)

* Fixed NormalizeL2

* style applied

* Fixed axes rank checking

* removed redundant comma

* Set keep_dims by function argument

* Improved error message

* Fixed casting method

* fix styles

* styles applied

* Fixed normalize tests

* unit tests names refactor
Co-Authored-By: 's avatarMichał Karzyński <postrational@users.noreply.github.com>

* Disable not supported PlaidML tests

* Added missing EOF
parent 6291f0aa
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include "ngraph/op/multiply.hpp" #include "ngraph/op/multiply.hpp"
#include "ngraph/op/not_equal.hpp" #include "ngraph/op/not_equal.hpp"
#include "ngraph/op/power.hpp" #include "ngraph/op/power.hpp"
#include "ngraph/op/reduce_sum.hpp"
#include "ngraph/op/sqrt.hpp" #include "ngraph/op/sqrt.hpp"
#include "ngraph/op/sum.hpp" #include "ngraph/op/sum.hpp"
#include "ngraph/shape.hpp" #include "ngraph/shape.hpp"
...@@ -103,9 +104,14 @@ namespace ngraph ...@@ -103,9 +104,14 @@ namespace ngraph
shared_ptr<Node> l2_norm(const Output<Node>& value, shared_ptr<Node> l2_norm(const Output<Node>& value,
const AxisSet& reduction_axes, const AxisSet& reduction_axes,
float bias, float bias,
BiasMode bias_mode) BiasMode bias_mode,
bool keep_dims)
{ {
shared_ptr<Node> values{make_shared<op::Sum>(value * value, reduction_axes)}; shared_ptr<Node> values{make_shared<op::v1::ReduceSum>(
value * value,
make_shared<op::Constant>(
element::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()),
keep_dims)};
shared_ptr<Node> bias_node{ shared_ptr<Node> bias_node{
op::Constant::create(values->get_element_type(), op::Constant::create(values->get_element_type(),
......
...@@ -68,13 +68,15 @@ namespace ngraph ...@@ -68,13 +68,15 @@ namespace ngraph
/// \param[in] reduction_axes The axes along which we calculate norm. /// \param[in] reduction_axes The axes along which we calculate norm.
/// \param[in] bias The bias combined with calculated sum. /// \param[in] bias The bias combined with calculated sum.
/// \param[in] bias_mode The method of bias application. /// \param[in] bias_mode The method of bias application.
/// \param[in] keep_dims The flag indicates if axes will be removed or kept.
/// ///
/// \return L-2 norm of value. /// \return L-2 norm of value.
/// ///
std::shared_ptr<Node> l2_norm(const Output<Node>& value, std::shared_ptr<Node> l2_norm(const Output<Node>& value,
const AxisSet& reduction_axes, const AxisSet& reduction_axes,
float bias = 0.f, float bias = 0.f,
BiasMode bias_mode = BiasMode::ADD); BiasMode bias_mode = BiasMode::ADD,
bool keep_dims = false);
/// \brief Creates node which calculates L-p norm on input tensor. /// \brief Creates node which calculates L-p norm on input tensor.
/// ///
......
...@@ -28,7 +28,8 @@ ...@@ -28,7 +28,8 @@
#include "ngraph/op/max.hpp" #include "ngraph/op/max.hpp"
#include "ngraph/op/min.hpp" #include "ngraph/op/min.hpp"
#include "ngraph/op/multiply.hpp" #include "ngraph/op/multiply.hpp"
#include "ngraph/op/product.hpp" #include "ngraph/op/reduce_prod.hpp"
#include "ngraph/op/reduce_sum.hpp"
#include "ngraph/op/sum.hpp" #include "ngraph/op/sum.hpp"
#include "ngraph/op/util/broadcasting.hpp" #include "ngraph/op/util/broadcasting.hpp"
#include "utils/reduction.hpp" #include "utils/reduction.hpp"
...@@ -128,7 +129,8 @@ namespace ngraph ...@@ -128,7 +129,8 @@ namespace ngraph
std::placeholders::_1, std::placeholders::_1,
std::placeholders::_2, std::placeholders::_2,
0.f, 0.f,
ngraph::builder::BiasMode::ADD); ngraph::builder::BiasMode::ADD,
false);
return {reduction::make_ng_reduction_op( return {reduction::make_ng_reduction_op(
node, node.get_ng_inputs().at(0), l2_norm_reduction)}; node, node.get_ng_inputs().at(0), l2_norm_reduction)};
} }
...@@ -208,9 +210,10 @@ namespace ngraph ...@@ -208,9 +210,10 @@ namespace ngraph
return {reduction::make_ng_reduction_op( return {reduction::make_ng_reduction_op(
node, node,
node.get_ng_inputs().at(0), node.get_ng_inputs().at(0),
std::make_shared<ngraph::op::Product, std::make_shared<ngraph::op::v1::ReduceProd,
const std::shared_ptr<ngraph::Node>&, const std::shared_ptr<ngraph::Node>&,
const ngraph::AxisSet&>)}; const std::shared_ptr<ngraph::Node>&,
bool>)};
} }
/// \brief Compute the sum of the input tensor's elements along the provided /// \brief Compute the sum of the input tensor's elements along the provided
...@@ -230,9 +233,10 @@ namespace ngraph ...@@ -230,9 +233,10 @@ namespace ngraph
return {reduction::make_ng_reduction_op( return {reduction::make_ng_reduction_op(
node, node,
node.get_ng_inputs().at(0), node.get_ng_inputs().at(0),
std::make_shared<ngraph::op::Sum, std::make_shared<ngraph::op::v1::ReduceSum,
const std::shared_ptr<ngraph::Node>&, const std::shared_ptr<ngraph::Node>&,
const ngraph::AxisSet&>)}; const std::shared_ptr<ngraph::Node>&,
bool>)};
} }
/// \brief Compute the sum square of the input tensor's element along the /// \brief Compute the sum square of the input tensor's element along the
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <vector> #include <vector>
#include "exceptions.hpp" #include "exceptions.hpp"
#include "ngraph/op/constant.hpp"
#include "reduction.hpp" #include "reduction.hpp"
#include "utils/common.hpp" #include "utils/common.hpp"
...@@ -80,6 +81,31 @@ namespace ngraph ...@@ -80,6 +81,31 @@ namespace ngraph
Shape{output_shape}); Shape{output_shape});
} }
std::shared_ptr<ngraph::Node>
make_ng_reduction_op(const Node& node,
const std::shared_ptr<ngraph::Node>& ng_input,
RuntimeReductionFunction reduction_function)
{
auto data_shape = ng_input->get_shape();
auto reduction_axes = detail::get_reduction_axes(node);
ASSERT_VALID_ARGUMENT(node, reduction_axes.size() <= data_shape.size())
<< "provided reduction axes count (" << reduction_axes.size()
<< ") is larger than input tensor rank (" << data_shape.size() << ")";
std::int64_t keepdims = node.get_attribute_value<std::int64_t>("keepdims", 1);
std::shared_ptr<ngraph::Node> op_node = reduction_function(
ng_input,
std::make_shared<ngraph::op::Constant>(element::i64,
ngraph::Shape{reduction_axes.size()},
reduction_axes.to_vector()),
static_cast<bool>(keepdims));
return op_node;
}
} // namespace reduction } // namespace reduction
} // namespace onnx_import } // namespace onnx_import
} // namespace ngraph } // namespace ngraph
...@@ -41,6 +41,9 @@ namespace ngraph ...@@ -41,6 +41,9 @@ namespace ngraph
} // namespace detail } // namespace detail
using RuntimeReductionFunction = std::function<std::shared_ptr<ngraph::Node>(
const std::shared_ptr<ngraph::Node>&, const std::shared_ptr<ngraph::Node>&, bool)>;
using ReductionFunction = std::function<std::shared_ptr<ngraph::Node>( using ReductionFunction = std::function<std::shared_ptr<ngraph::Node>(
const std::shared_ptr<ngraph::Node>&, const ngraph::AxisSet&)>; const std::shared_ptr<ngraph::Node>&, const ngraph::AxisSet&)>;
...@@ -59,6 +62,22 @@ namespace ngraph ...@@ -59,6 +62,22 @@ namespace ngraph
const std::shared_ptr<ngraph::Node>& ng_input, const std::shared_ptr<ngraph::Node>& ng_input,
ReductionFunction reduction_function); ReductionFunction reduction_function);
///
/// \brief Create an nGraph version of an ONNX reduction operation.
///
/// \param[in] node The node representing incoming ONNX operation.
/// \param[in] ng_input The input (nGraph) Tensor.
/// \param[in] reduction_function The reduction function defining arithmetic dynamic
/// reduction
/// operation (e.g. ReduceProd, ReduceSum).
///
/// \return nGraph node equivalent of the ONNX operation.
///
std::shared_ptr<ngraph::Node>
make_ng_reduction_op(const Node& node,
const std::shared_ptr<ngraph::Node>& ng_input,
RuntimeReductionFunction reduction_function);
template <class IndexReduction> template <class IndexReduction>
std::shared_ptr<ngraph::Node> make_ng_index_reduction_op(const Node& node) std::shared_ptr<ngraph::Node> make_ng_index_reduction_op(const Node& node)
{ {
......
...@@ -53,8 +53,8 @@ void op::NormalizeL2::pre_validate_and_infer_types() ...@@ -53,8 +53,8 @@ void op::NormalizeL2::pre_validate_and_infer_types()
if (axes_rank.is_static()) if (axes_rank.is_static())
{ {
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
static_cast<size_t>(axes_rank) == 1, static_cast<size_t>(axes_rank) <= 1,
"Input axes must have rank equals 1 (axes rank: ", "Input axes must be scalar or have rank equal to 1 (axes rank: ",
axes_rank, axes_rank,
")."); ").");
...@@ -97,7 +97,7 @@ NodeVector op::NormalizeL2::decompose_op() const ...@@ -97,7 +97,7 @@ NodeVector op::NormalizeL2::decompose_op() const
// Calculate l2 norm across axes determined by axes input // Calculate l2 norm across axes determined by axes input
auto builder_bias_mode = auto builder_bias_mode =
(m_eps_mode == EpsMode::MAX) ? builder::BiasMode::MAX : builder::BiasMode::ADD; (m_eps_mode == EpsMode::MAX) ? builder::BiasMode::MAX : builder::BiasMode::ADD;
Output<Node> norm = builder::l2_norm(data, reduction_axes, m_eps, builder_bias_mode); Output<Node> norm = builder::l2_norm(data, reduction_axes, m_eps, builder_bias_mode, true);
data = make_shared<op::Divide>(data, norm, AutoBroadcastSpec(AutoBroadcastType::NUMPY)); data = make_shared<op::Divide>(data, norm, AutoBroadcastSpec(AutoBroadcastType::NUMPY));
......
...@@ -15,10 +15,16 @@ ...@@ -15,10 +15,16 @@
//***************************************************************************** //*****************************************************************************
#include "ngraph/pass/opset0_downgrade.hpp" #include "ngraph/pass/opset0_downgrade.hpp"
#include "ngraph/graph_util.hpp" #include "ngraph/graph_util.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/constant.hpp" #include "ngraph/op/constant.hpp"
#include "ngraph/op/get_output_element.hpp" #include "ngraph/op/get_output_element.hpp"
#include "ngraph/op/pad.hpp" #include "ngraph/op/pad.hpp"
#include "ngraph/op/product.hpp"
#include "ngraph/op/reduce_prod.hpp"
#include "ngraph/op/reduce_sum.hpp"
#include "ngraph/op/reshape.hpp"
#include "ngraph/op/reverse.hpp" #include "ngraph/op/reverse.hpp"
#include "ngraph/op/sum.hpp"
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
...@@ -79,7 +85,7 @@ bool pass::Opset0Downgrade::run_on_node(shared_ptr<Node> node) ...@@ -79,7 +85,7 @@ bool pass::Opset0Downgrade::run_on_node(shared_ptr<Node> node)
{ {
case OP_TYPEID::Pad: case OP_TYPEID::Pad:
{ {
auto tmp = dynamic_cast<const op::v1::Pad*>(node.get()); auto tmp = as_type_ptr<op::v1::Pad>(node);
const auto pad_arg = node->input(0).get_source_output(); const auto pad_arg = node->input(0).get_source_output();
const auto pad_value = node->input(3).get_source_output(); const auto pad_value = node->input(3).get_source_output();
auto replacement_node = make_shared<op::v0::Pad>( auto replacement_node = make_shared<op::v0::Pad>(
...@@ -89,6 +95,40 @@ bool pass::Opset0Downgrade::run_on_node(shared_ptr<Node> node) ...@@ -89,6 +95,40 @@ bool pass::Opset0Downgrade::run_on_node(shared_ptr<Node> node)
modified = true; modified = true;
break; break;
} }
case OP_TYPEID::Product:
{
auto tmp = as_type_ptr<op::v1::ReduceProd>(node);
auto replacement_node = make_shared<op::v0::Product>(node->input(0).get_source_output(),
node->input(1).get_source_output());
if (tmp->get_keep_dims())
{
NGRAPH_CHECK(tmp->reduction_axes_constant(),
"Unable to convert ReduceProd:v1 to Product:v0 "
"if reduction axes are not constant (for keep_dims=true). Node: ",
*node);
auto output_pshape = replacement_node->get_output_partial_shape(0);
NGRAPH_CHECK(output_pshape.is_static(),
"Unable to convert ReduceProd:v1 to Product:v0 "
"if output shape is dynamic (for keep_dims=true). Node: ",
*node);
const auto output_shape = output_pshape.to_shape();
auto reshaped_output_shape = output_shape;
for (const auto& axis : tmp->get_reduction_axes())
{
reshaped_output_shape.insert(reshaped_output_shape.begin() + axis, 1);
}
auto reshaped_product = make_shared<op::Reshape>(replacement_node->output(0),
get_default_order(output_shape),
reshaped_output_shape);
replace_node(node, reshaped_product);
}
else
{
replace_node(node, replacement_node);
}
modified = true;
break;
}
case OP_TYPEID::Reverse: case OP_TYPEID::Reverse:
{ {
auto tmp = as_type_ptr<op::v1::Reverse>(node); auto tmp = as_type_ptr<op::v1::Reverse>(node);
...@@ -121,11 +161,44 @@ bool pass::Opset0Downgrade::run_on_node(shared_ptr<Node> node) ...@@ -121,11 +161,44 @@ bool pass::Opset0Downgrade::run_on_node(shared_ptr<Node> node)
modified = true; modified = true;
break; break;
} }
case OP_TYPEID::Sum:
{
auto tmp = as_type_ptr<op::v1::ReduceSum>(node);
auto replacement_node = make_shared<op::v0::Sum>(node->input(0).get_source_output(),
node->input(1).get_source_output());
if (tmp->get_keep_dims())
{
NGRAPH_CHECK(tmp->reduction_axes_constant(),
"Unable to convert ReduceSum:v1 to Sum:v0 "
"if reduction axes are not constant (for keep_dims=true). Node: ",
*node);
auto output_pshape = replacement_node->get_output_partial_shape(0);
NGRAPH_CHECK(output_pshape.is_static(),
"Unable to convert ReduceSum:v1 to Sum:v0 "
"if output shape is dynamic (for keep_dims=true). Node: ",
*node);
const auto output_shape = output_pshape.to_shape();
auto reshaped_output_shape = output_shape;
for (const auto& axis : tmp->get_reduction_axes())
{
reshaped_output_shape.insert(reshaped_output_shape.begin() + axis, 1);
}
auto reshaped_product = make_shared<op::Reshape>(replacement_node->output(0),
get_default_order(output_shape),
reshaped_output_shape);
replace_node(node, reshaped_product);
}
else
{
replace_node(node, replacement_node);
}
modified = true;
break;
}
default: break; default: break;
} }
#if defined(__clang__) #if defined(__clang__)
#pragma clang diagnostic pop #pragma clang diagnostic pop
#endif #endif
return modified; return modified;
} }
...@@ -298,3 +298,7 @@ dyn_replace_slice ...@@ -298,3 +298,7 @@ dyn_replace_slice
# bf16 test cases not supported # bf16 test cases not supported
convert_float32_bf16 convert_float32_bf16
convert_bf16_float32 convert_bf16_float32
# infinitive values are returned for below cases
normalize_across_c_2x2_shape
normalize_across_c_2x4_shape
...@@ -734,6 +734,61 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_across_123axes_5d) ...@@ -734,6 +734,61 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_across_123axes_5d)
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1); test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1);
} }
NGRAPH_TEST(${BACKEND_NAME}, normalize_across_c_2x2_shape)
{
Shape data_shape{2, 2};
auto data = make_shared<op::Parameter>(element::f32, data_shape);
const auto axes = make_shared<op::Constant>(element::i64, Shape{}, vector<int64_t>{1});
float eps{1e-6f};
auto eps_mode = op::EpsMode::ADD;
auto normalize = make_shared<op::NormalizeL2>(data, axes, eps, eps_mode);
auto function = make_shared<Function>(NodeVector{normalize}, ParameterVector{data});
auto test_case = test::NgraphTestCase(function, "${BACKEND_NAME}");
vector<float> input_data(shape_size(data_shape));
iota(begin(input_data), end(input_data), 1);
test_case.add_input<float>(input_data);
test_case.add_expected_output<float>(data_shape,
{0.44721353f, 0.89442706f, 0.60000002f, 0.80000001f});
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1);
}
NGRAPH_TEST(${BACKEND_NAME}, normalize_across_c_2x4_shape)
{
Shape data_shape{2, 4};
auto data = make_shared<op::Parameter>(element::f32, data_shape);
const auto axes = make_shared<op::Constant>(element::i64, Shape{}, vector<int64_t>{1});
float eps{1e-6f};
auto eps_mode = op::EpsMode::ADD;
auto normalize = make_shared<op::NormalizeL2>(data, axes, eps, eps_mode);
auto function = make_shared<Function>(NodeVector{normalize}, ParameterVector{data});
auto test_case = test::NgraphTestCase(function, "${BACKEND_NAME}");
vector<float> input_data(shape_size(data_shape));
iota(begin(input_data), end(input_data), 1);
test_case.add_input<float>(input_data);
test_case.add_expected_output<float>(data_shape,
{0.18257418f,
0.36514837f,
0.54772252f,
0.73029673f,
0.37904903f,
0.45485884f,
0.53066862f,
0.60647845f});
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1);
}
NGRAPH_TEST(${BACKEND_NAME}, normalize_across_chw_4d_max_bias) NGRAPH_TEST(${BACKEND_NAME}, normalize_across_chw_4d_max_bias)
{ {
Shape data_shape{1, 2, 3, 4}; Shape data_shape{1, 2, 3, 4};
......
...@@ -19,13 +19,14 @@ ...@@ -19,13 +19,14 @@
#include "ngraph/ngraph.hpp" #include "ngraph/ngraph.hpp"
#include "ngraph/pass/manager.hpp" #include "ngraph/pass/manager.hpp"
#include "ngraph/pass/opset0_downgrade.hpp"
#include "ngraph/pass/opset1_upgrade.hpp" #include "ngraph/pass/opset1_upgrade.hpp"
#include "util/type_prop.hpp" #include "util/type_prop.hpp"
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
TEST(serialize, opset1_product_upgrade) TEST(opset_transform, opset1_product_upgrade_pass)
{ {
const auto data = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3}); const auto data = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3});
const AxisSet reduction_axes{1, 2}; const AxisSet reduction_axes{1, 2};
...@@ -46,3 +47,124 @@ TEST(serialize, opset1_product_upgrade) ...@@ -46,3 +47,124 @@ TEST(serialize, opset1_product_upgrade)
EXPECT_EQ(reduce_prod_v1->get_version(), 1); EXPECT_EQ(reduce_prod_v1->get_version(), 1);
EXPECT_EQ(reduce_prod_v1->get_keep_dims(), false); EXPECT_EQ(reduce_prod_v1->get_keep_dims(), false);
} }
TEST(opset_transform, opset0_reduce_prod_downgrade_pass)
{
const auto data = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3});
const auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{0, 1});
const auto product_v1 = make_shared<op::v1::ReduceProd>(data, axes, true);
const auto result = make_shared<op::Result>(product_v1);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{data});
ngraph::pass::Manager pass_manager;
pass_manager.register_pass<pass::Opset0Downgrade>();
pass_manager.run_passes(f);
const auto reshape_replacement_node =
f->get_result()->input(0).get_source_output().get_node_shared_ptr();
const auto reshape = static_pointer_cast<op::Reshape>(reshape_replacement_node);
const auto product_replace_node =
reshape_replacement_node->input(0).get_source_output().get_node_shared_ptr();
const auto product_v0 = static_pointer_cast<op::v0::Product>(product_replace_node);
EXPECT_EQ(reshape->description(), "Reshape");
EXPECT_EQ(reshape->get_version(), 0);
EXPECT_EQ(product_v0->description(), "Product");
EXPECT_EQ(product_v0->get_version(), 0);
}
TEST(opset_transform, opset0_reduce_prod_downgrade_pass_axes_not_constant)
{
const auto data = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3});
const auto axes = make_shared<op::Parameter>(element::f32, Shape{1});
const auto product_v1 = make_shared<op::v1::ReduceProd>(data, axes, true);
const auto result = make_shared<op::Result>(product_v1);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{data, axes});
ngraph::pass::Manager pass_manager;
pass_manager.register_pass<pass::Opset0Downgrade>();
try
{
pass_manager.run_passes(f);
FAIL() << "Exception after Opset0Downgrade pass was not thrown.";
}
catch (const ngraph_error& error)
{
EXPECT_HAS_SUBSTRING(
error.what(),
std::string("Unable to convert ReduceProd:v1 to Product:v0 "
"if reduction axes are not constant (for keep_dims=true)"));
}
catch (...)
{
FAIL() << "ReduceProd pass failed for unexpected reason";
}
}
TEST(opset_transform, opset0_reduce_prod_downgrade_pass_output_not_static)
{
const auto data = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
const auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{0, 1});
const auto product_v1 = make_shared<op::v1::ReduceProd>(data, axes, true);
const auto result = make_shared<op::Result>(product_v1);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{data});
ngraph::pass::Manager pass_manager;
pass_manager.register_pass<pass::Opset0Downgrade>();
try
{
pass_manager.run_passes(f);
FAIL() << "Exception after Opset0Downgrade pass was not thrown.";
}
catch (const ngraph_error& error)
{
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Unable to convert ReduceProd:v1 to Product:v0 "
"if output shape is dynamic (for keep_dims=true)"));
}
catch (...)
{
FAIL() << "ReduceProd pass failed for unexpected reason";
}
}
TEST(opset_transform, opset0_reduce_prod_downgrade_pass_out_shape_if_keep_dims)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{3, 4, 5});
auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{1, 2});
auto keep_dims = true;
auto reduce_prod_v1 = make_shared<op::v1::ReduceProd>(arg, axes, keep_dims);
const auto result = make_shared<op::Result>(reduce_prod_v1);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{arg});
ngraph::pass::Manager pass_manager;
pass_manager.register_pass<pass::Opset0Downgrade>();
pass_manager.run_passes(f);
const auto replacement_node =
f->get_result()->input(0).get_source_output().get_node_shared_ptr();
ASSERT_TRUE(replacement_node->get_output_partial_shape(0).compatible(PartialShape{3, 1, 1}));
}
TEST(opset_transform, opset0_reduce_prod_downgrade_pass_out_shape_if_not_keep_dims)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{3, 4, 5});
auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{1, 2});
auto keep_dims = false;
auto reduce_prod_v1 = make_shared<op::v1::ReduceProd>(arg, axes, keep_dims);
const auto result = make_shared<op::Result>(reduce_prod_v1);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{arg});
ngraph::pass::Manager pass_manager;
pass_manager.register_pass<pass::Opset0Downgrade>();
pass_manager.run_passes(f);
const auto replacement_node =
f->get_result()->input(0).get_source_output().get_node_shared_ptr();
ASSERT_TRUE(replacement_node->get_output_partial_shape(0).compatible(PartialShape{3}));
}
...@@ -19,13 +19,14 @@ ...@@ -19,13 +19,14 @@
#include "ngraph/ngraph.hpp" #include "ngraph/ngraph.hpp"
#include "ngraph/pass/manager.hpp" #include "ngraph/pass/manager.hpp"
#include "ngraph/pass/opset0_downgrade.hpp"
#include "ngraph/pass/opset1_upgrade.hpp" #include "ngraph/pass/opset1_upgrade.hpp"
#include "util/type_prop.hpp" #include "util/type_prop.hpp"
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
TEST(serialize, opset1_sum_upgrade) TEST(opset_transform, opset1_reduce_sum_upgrade_pass)
{ {
const auto data = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3}); const auto data = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3});
const AxisSet reduction_axes{1, 2}; const AxisSet reduction_axes{1, 2};
...@@ -40,9 +41,130 @@ TEST(serialize, opset1_sum_upgrade) ...@@ -40,9 +41,130 @@ TEST(serialize, opset1_sum_upgrade)
const auto pass_replacement_node = const auto pass_replacement_node =
f->get_result()->input(0).get_source_output().get_node_shared_ptr(); f->get_result()->input(0).get_source_output().get_node_shared_ptr();
const auto reduce_sum_v1 = static_pointer_cast<op::v1::ReduceProd>(pass_replacement_node); const auto reduce_sum_v1 = static_pointer_cast<op::v1::ReduceSum>(pass_replacement_node);
EXPECT_EQ(reduce_sum_v1->description(), "Sum"); EXPECT_EQ(reduce_sum_v1->description(), "Sum");
EXPECT_EQ(reduce_sum_v1->get_version(), 1); EXPECT_EQ(reduce_sum_v1->get_version(), 1);
EXPECT_EQ(reduce_sum_v1->get_keep_dims(), false); EXPECT_EQ(reduce_sum_v1->get_keep_dims(), false);
} }
TEST(opset_transform, opset0_reduce_sum_downgrade_pass)
{
const auto data = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3});
const auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{0, 1});
const auto sum_v1 = make_shared<op::v1::ReduceSum>(data, axes, true);
const auto result = make_shared<op::Result>(sum_v1);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{data});
ngraph::pass::Manager pass_manager;
pass_manager.register_pass<pass::Opset0Downgrade>();
pass_manager.run_passes(f);
const auto reshape_replacement_node =
f->get_result()->input(0).get_source_output().get_node_shared_ptr();
const auto reshape = static_pointer_cast<op::Reshape>(reshape_replacement_node);
const auto sum_replace_node =
reshape_replacement_node->input(0).get_source_output().get_node_shared_ptr();
const auto sum_v0 = static_pointer_cast<op::v0::Sum>(sum_replace_node);
EXPECT_EQ(reshape->description(), "Reshape");
EXPECT_EQ(reshape->get_version(), 0);
EXPECT_EQ(sum_v0->description(), "Sum");
EXPECT_EQ(sum_v0->get_version(), 0);
}
TEST(opset_transform, opset0_reduce_sum_downgrade_pass_not_constant_axes)
{
const auto data = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3});
const auto axes = make_shared<op::Parameter>(element::f32, Shape{1});
const auto sum_v1 = make_shared<op::v1::ReduceSum>(data, axes, true);
const auto result = make_shared<op::Result>(sum_v1);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{data, axes});
ngraph::pass::Manager pass_manager;
pass_manager.register_pass<pass::Opset0Downgrade>();
try
{
pass_manager.run_passes(f);
FAIL() << "Exception after Opset0Downgrade pass was not thrown.";
}
catch (const ngraph_error& error)
{
EXPECT_HAS_SUBSTRING(
error.what(),
std::string("Unable to convert ReduceSum:v1 to Sum:v0 "
"if reduction axes are not constant (for keep_dims=true)"));
}
catch (...)
{
FAIL() << "ReduceSum pass failed for unexpected reason";
}
}
TEST(opset_transform, opset0_reduce_sum_downgrade_pass_output_not_static)
{
const auto data = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
const auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{0, 1});
const auto sum_v1 = make_shared<op::v1::ReduceSum>(data, axes, true);
const auto result = make_shared<op::Result>(sum_v1);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{data});
ngraph::pass::Manager pass_manager;
pass_manager.register_pass<pass::Opset0Downgrade>();
try
{
pass_manager.run_passes(f);
FAIL() << "Exception after Opset0Downgrade pass was not thrown.";
}
catch (const ngraph_error& error)
{
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Unable to convert ReduceSum:v1 to Sum:v0 "
"if output shape is dynamic (for keep_dims=true)"));
}
catch (...)
{
FAIL() << "ReduceSum pass failed for unexpected reason";
}
}
TEST(opset_transform, opset0_reduce_sum_downgrade_pass_out_shape_if_keep_dims)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{3, 4, 5});
auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{1, 2});
auto keep_dims = true;
auto reduce_sum_v1 = make_shared<op::v1::ReduceSum>(arg, axes, keep_dims);
const auto result = make_shared<op::Result>(reduce_sum_v1);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{arg});
ngraph::pass::Manager pass_manager;
pass_manager.register_pass<pass::Opset0Downgrade>();
pass_manager.run_passes(f);
const auto replacement_node =
f->get_result()->input(0).get_source_output().get_node_shared_ptr();
ASSERT_TRUE(replacement_node->get_output_partial_shape(0).compatible(PartialShape{3, 1, 1}));
}
TEST(opset_transform, opset0_reduce_sum_downgrade_pass_out_shape_if_not_keep_dims)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{3, 4, 5});
auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{1, 2});
auto keep_dims = false;
auto reduce_sum_v1 = make_shared<op::v1::ReduceSum>(arg, axes, keep_dims);
const auto result = make_shared<op::Result>(reduce_sum_v1);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{arg});
ngraph::pass::Manager pass_manager;
pass_manager.register_pass<pass::Opset0Downgrade>();
pass_manager.run_passes(f);
const auto replacement_node =
f->get_result()->input(0).get_source_output().get_node_shared_ptr();
ASSERT_TRUE(replacement_node->get_output_partial_shape(0).compatible(PartialShape{3}));
}
...@@ -20,9 +20,8 @@ import numpy as np ...@@ -20,9 +20,8 @@ import numpy as np
input = np.arange(1, 25, 1).reshape(1, 2, 3, 4).astype(np.float32) input = np.arange(1, 25, 1).reshape(1, 2, 3, 4).astype(np.float32)
eps = np.array([1e-6]).astype(np.float32) eps = np.array([1e-6]).astype(np.float32)
# across chw axes # across chw axes
norm = np.sqrt(np.maximum(np.sum(np.power(input, 2), axis=(1, 2, 3)), eps)) norm = np.sqrt(np.sum(np.power(input, 2), axis=(1), keepdims=True) + eps)
result = input/norm result = input/norm
for elem in np.nditer(result): for elem in np.nditer(result):
print(str(round(elem, 8)) + 'f, ') print(str(round(elem, 8)) + 'f, ')
...@@ -61,7 +61,8 @@ TEST(type_prop, normalize_invalid_axes_rank) ...@@ -61,7 +61,8 @@ TEST(type_prop, normalize_invalid_axes_rank)
} }
catch (const NodeValidationFailure& error) catch (const NodeValidationFailure& error)
{ {
EXPECT_HAS_SUBSTRING(error.what(), std::string("Input axes must have rank equals 1")); EXPECT_HAS_SUBSTRING(error.what(),
std::string("Input axes must be scalar or have rank equal to 1"));
} }
catch (...) catch (...)
{ {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment