Commit 81f33056 authored by Nagy Mostafa's avatar Nagy Mostafa Committed by Robert Kimball

Dynamic Padding Implementation (#2641)

* Inital DynPad implementation

* Inital DynPad implementation

* Fixed DynPad validation. Added Unit-test

* Nits and white-space fixes

* - PR feedback.
- Added padding rank check tests

* Minor comment fix

* Fix merge error
parent 6716068c
......@@ -130,6 +130,8 @@ set (SRC
op/exp.hpp
op/experimental/dyn_broadcast.cpp
op/experimental/dyn_broadcast.hpp
op/experimental/dyn_pad.cpp
op/experimental/dyn_pad.hpp
op/experimental/generate_mask.cpp
op/experimental/generate_mask.hpp
op/experimental/quantized_avg_pool.cpp
......
......@@ -88,6 +88,7 @@
#include "ngraph/op/exp.hpp"
#include "ngraph/op/experimental/dyn_broadcast.hpp"
#include "ngraph/op/experimental/dyn_broadcast.hpp"
#include "ngraph/op/experimental/dyn_pad.hpp"
#include "ngraph/op/experimental/shape_of.hpp"
#include "ngraph/op/experimental/transpose.hpp"
#include "ngraph/op/floor.hpp"
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/op/experimental/dyn_pad.hpp"
using namespace std;
using namespace ngraph;
op::DynPad::DynPad(const std::shared_ptr<Node>& arg,
const std::shared_ptr<Node>& padding_below,
const std::shared_ptr<Node>& padding_above,
const std::shared_ptr<Node>& padding_value)
: Op("DynPad", check_single_output_args({arg, padding_below, padding_above, padding_value}))
{
constructor_validate_and_infer_types();
}
void op::DynPad::validate_and_infer_types()
{
auto arg_t = get_input_element_type(0);
auto padding_value_t = get_input_element_type(3);
NODE_VALIDATION_CHECK(
this, arg_t.compatible(padding_value_t), "Padding value and arg type mismatch");
// shape node should have integer data type. For now we only allow i64
//TODO: potenially make the type more flexible to include other integer types
auto padding_below_et = get_input_element_type(1);
NODE_VALIDATION_CHECK(this,
padding_below_et.compatible(element::Type_t::i64),
"DynPad shape must have element type i64, but has ",
padding_below_et);
auto padding_above_et = get_input_element_type(2);
NODE_VALIDATION_CHECK(this,
padding_above_et.compatible(element::Type_t::i64),
"DynPad shape must have element type i64, but has ",
padding_above_et);
// padding_value is of scalar shape or rank is unknown
auto padding_value_rank = get_input_partial_shape(3).rank();
NODE_VALIDATION_CHECK(this,
padding_value_rank.compatible(0),
"DynPad arg is not scalar (rank = 0), but has rank = ",
padding_value_rank);
auto arg_shape = get_input_partial_shape(0);
auto arg_rank = arg_shape.rank();
auto pd_bl_shape = get_input_partial_shape(1);
auto pd_bl_rank = pd_bl_shape.rank();
auto pd_ab_shape = get_input_partial_shape(2);
auto pd_ab_rank = pd_ab_shape.rank();
auto output_rank = Rank::dynamic();
NODE_VALIDATION_CHECK(
this, pd_bl_rank.compatible(1), "Shape of padding below must be of rank 1");
NODE_VALIDATION_CHECK(
this, pd_ab_rank.compatible(1), "Shape of padding above must be of rank 1");
if (arg_rank.is_static())
{
// paddings shapes should be of form {arg_rank} or dynamic
NODE_VALIDATION_CHECK(this,
pd_bl_shape.compatible(PartialShape{arg_rank}),
"Arg and padding below ranks mismatch");
NODE_VALIDATION_CHECK(this,
pd_ab_shape.compatible(PartialShape{arg_rank}),
"Arg and padding above ranks mismatch");
output_rank = arg_rank;
}
else
{
// arg's rank is dynamic
// Check padding shapes. We already know that both are either ?, {?} or {x}
// They must be equal if both of form {x}
NODE_VALIDATION_CHECK(
this, pd_bl_shape.compatible(pd_ab_shape), "Padding below and above ranks mismatch");
output_rank = pd_bl_shape.is_static() ? pd_bl_shape[0] : pd_ab_shape[0];
}
auto out_shape = PartialShape::dynamic(output_rank);
set_output_type(0, arg_t, out_shape);
}
shared_ptr<Node> op::DynPad::copy_with_new_args(const NodeVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<DynPad>(new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3));
}
// TODO: This function is not implemented!
void op::DynPad::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas)
{
throw ngraph_error("generate_adjoints not implemented for DynPad");
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/op/op.hpp"
namespace ngraph
{
namespace op
{
/// \brief Generic padding operation which takes padding below and above as dynamic shapes.
/// This is similar to existing Pad operation except padding values are dynamic.
class DynPad : public Op
{
public:
/// \brief Perform dynamic padding of a tensor
///
/// \param arg The node producing input tensor to be padded.
/// \param padding_below The node producing the padding-below widths.
/// \param padding_above The node producing the padding-above widths.
/// \param padding_value The value to be used for padding. Must be scalar.
DynPad(const std::shared_ptr<Node>& arg,
const std::shared_ptr<Node>& padding_below,
const std::shared_ptr<Node>& padding_above,
const std::shared_ptr<Node>& padding_value);
void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
};
}
}
......@@ -81,6 +81,7 @@ NGRAPH_OP(Dequantize, ngraph::op)
NGRAPH_OP(Divide, ngraph::op)
NGRAPH_OP(DynBroadcast, ngraph::op)
NGRAPH_OP(Dot, ngraph::op)
NGRAPH_OP(DynPad, ngraph::op)
NGRAPH_OP(Equal, ngraph::op)
NGRAPH_OP(Exp, ngraph::op)
NGRAPH_OP(Floor, ngraph::op)
......
......@@ -221,6 +221,7 @@ bool runtime::gpu::GPU_Backend::is_supported(const Node& op) const
"All",
"Any",
"AllReduce",
"DynPad"
"SelectAndScatter",
"StopGradient",
"EmbeddingLookup",
......
......@@ -58,6 +58,7 @@
#include "ngraph/op/equal.hpp"
#include "ngraph/op/exp.hpp"
#include "ngraph/op/experimental/dyn_broadcast.hpp"
#include "ngraph/op/experimental/dyn_pad.hpp"
#include "ngraph/op/experimental/generate_mask.hpp"
#include "ngraph/op/experimental/quantized_avg_pool.hpp"
#include "ngraph/op/experimental/quantized_conv.hpp"
......@@ -1388,6 +1389,11 @@ std::string runtime::gpu::GPU_Emitter::emit_DynBroadcast(EMIT_ARGS)
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
std::string runtime::gpu::GPU_Emitter::emit_DynPad(EMIT_ARGS)
{
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
std::string runtime::gpu::GPU_Emitter::emit_Transpose(EMIT_ARGS)
{
throw unsupported_op("Unsupported op '" + node->description() + "'");
......
......@@ -2014,6 +2014,7 @@ shared_ptr<runtime::Executable>
case OP_TYPEID::EmbeddingLookup:
case OP_TYPEID::DynBroadcast:
case OP_TYPEID::Passthrough:
case OP_TYPEID::DynPad:
{
throw unsupported_op("Unsupported op '" + op->description() +
"' in IntelGPU back end.");
......
......@@ -37,6 +37,7 @@
#include "ngraph/op/embedding_lookup.hpp"
#include "ngraph/op/experimental/dyn_broadcast.hpp"
#include "ngraph/op/experimental/dyn_broadcast.hpp"
#include "ngraph/op/experimental/dyn_pad.hpp"
#include "ngraph/op/experimental/generate_mask.hpp"
#include "ngraph/op/experimental/shape_of.hpp"
#include "ngraph/op/get_output_element.hpp"
......@@ -1251,6 +1252,7 @@ private:
}
case OP_TYPEID::DynBroadcast:
case OP_TYPEID::Transpose:
case OP_TYPEID::DynPad:
default: throw unsupported_op("Unsupported op '" + node.description() + "'");
#pragma GCC diagnostic pop
}
......
......@@ -48,6 +48,7 @@
#include "ngraph/op/equal.hpp"
#include "ngraph/op/exp.hpp"
#include "ngraph/op/experimental/dyn_broadcast.hpp"
#include "ngraph/op/experimental/dyn_pad.hpp"
#include "ngraph/op/experimental/generate_mask.hpp"
#include "ngraph/op/experimental/quantized_avg_pool.hpp"
#include "ngraph/op/experimental/quantized_conv.hpp"
......@@ -753,6 +754,11 @@ static shared_ptr<ngraph::Function>
node = make_shared<op::DynBroadcast>(args[0], args[1], args[2]);
break;
}
case OP_TYPEID::DynPad:
{
node = make_shared<op::DynPad>(args[0], args[1], args[2], args[3]);
break;
}
case OP_TYPEID::EmbeddingLookup:
{
node = make_shared<op::EmbeddingLookup>(args[0], args[1]);
......@@ -1508,6 +1514,8 @@ static json write(const Node& n, bool binary_constant_data)
}
case OP_TYPEID::DynBroadcast: { break;
}
case OP_TYPEID::DynPad: { break;
}
case OP_TYPEID::EmbeddingLookup: { break;
}
case OP_TYPEID::Equal: { break;
......
......@@ -12492,3 +12492,163 @@ TEST(type_prop, transpose_input_order_et_wrong)
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, dyn_pad_pad_value_test)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{2, 3, 4});
auto pad_b = make_shared<op::Parameter>(element::i64, Shape{3});
auto pad_a = make_shared<op::Parameter>(element::i64, Shape{3});
// padding value matches tensor data-type
try
{
auto pad_v = make_shared<op::Parameter>(element::i32, Shape{});
auto dyn_pad = make_shared<op::DynPad>(arg, pad_b, pad_a, pad_v);
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "Padding value and arg type mismatch");
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
// padding value is scalar
try
{
auto pad_v = make_shared<op::Parameter>(element::f32, Shape{3});
auto dyn_pad = make_shared<op::DynPad>(arg, pad_b, pad_a, pad_v);
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "DynPad arg is not scalar");
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, dyn_pad_wrong_ranks)
{
auto pad_v = make_shared<op::Parameter>(element::f32, Shape{});
try
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{2, 3, 4, 5});
auto pad_b = make_shared<op::Parameter>(element::i64, Shape{3, 4});
auto pad_a = make_shared<op::Parameter>(element::i64, PartialShape::dynamic());
auto dyn_pad = make_shared<op::DynPad>(arg, pad_b, pad_a, pad_v);
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "Shape of padding below must be of rank 1");
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
try
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{2, 3, 4, 5});
auto pad_b = make_shared<op::Parameter>(element::i64, Shape{3});
auto pad_a = make_shared<op::Parameter>(
element::i64, PartialShape{Dimension::dynamic(), Dimension::dynamic()});
auto dyn_pad = make_shared<op::DynPad>(arg, pad_b, pad_a, pad_v);
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "Shape of padding above must be of rank 1");
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
try
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{2, 3, 4, 5});
auto pad_b = make_shared<op::Parameter>(element::i64, Shape{3});
auto pad_a = make_shared<op::Parameter>(element::i64, PartialShape::dynamic());
auto dyn_pad = make_shared<op::DynPad>(arg, pad_b, pad_a, pad_v);
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "Arg and padding below ranks mismatch");
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
try
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{2, 3, 4, 5});
auto pad_b = make_shared<op::Parameter>(element::i64, PartialShape::dynamic());
auto pad_a = make_shared<op::Parameter>(element::i64, Shape{3});
auto dyn_pad = make_shared<op::DynPad>(arg, pad_b, pad_a, pad_v);
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "Arg and padding above ranks mismatch");
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
try
{
auto arg = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto pad_b = make_shared<op::Parameter>(element::i64, Shape{4});
auto pad_a = make_shared<op::Parameter>(element::i64, Shape{3});
auto dyn_pad = make_shared<op::DynPad>(arg, pad_b, pad_a, pad_v);
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "Padding below and above ranks mismatch");
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, dyn_pad_output_ranks_arg_static_ok)
{
auto pad_v = make_shared<op::Parameter>(element::f32, Shape{});
auto arg = make_shared<op::Parameter>(element::f32, Shape{2, 3, 4, 5});
auto pad_b = make_shared<op::Parameter>(element::i64, PartialShape::dynamic());
auto pad_a = make_shared<op::Parameter>(element::i64, PartialShape::dynamic());
auto dyn_pad = make_shared<op::DynPad>(arg, pad_b, pad_a, pad_v);
EXPECT_EQ(dyn_pad->get_output_element_type(0), element::f32);
EXPECT_TRUE(dyn_pad->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4)));
}
TEST(type_prop, dyn_pad_output_ranks_arg_dynamic_ok)
{
auto pad_v = make_shared<op::Parameter>(element::f32, Shape{});
auto arg = make_shared<op::Parameter>(
element::f32, PartialShape{2, Dimension::dynamic(), 4, Dimension::dynamic()});
auto pad_b = make_shared<op::Parameter>(element::i64, PartialShape::dynamic());
auto pad_a = make_shared<op::Parameter>(element::i64, PartialShape::dynamic());
auto dyn_pad = make_shared<op::DynPad>(arg, pad_b, pad_a, pad_v);
EXPECT_EQ(dyn_pad->get_output_element_type(0), element::f32);
EXPECT_TRUE(dyn_pad->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4)));
}
TEST(type_prop, dyn_pad_output_ranks_pad_static_ok)
{
auto pad_v = make_shared<op::Parameter>(element::f32, Shape{});
auto arg = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto pad_b = make_shared<op::Parameter>(element::i64, Shape{3});
auto pad_a = make_shared<op::Parameter>(element::i64, Shape{3});
auto dyn_pad = make_shared<op::DynPad>(arg, pad_b, pad_a, pad_v);
EXPECT_EQ(dyn_pad->get_output_element_type(0), element::f32);
EXPECT_TRUE(dyn_pad->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(3)));
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment